aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitrii Agibov <dmitrii.agibov@arm.com>2023-01-27 09:12:50 +0000
committerBenjamin Klimczak <benjamin.klimczak@arm.com>2023-02-08 15:25:11 +0000
commit3e3dcb9bd5abb88adcd85b4f89e8a81e7f6fa293 (patch)
tree020eee6abef093113de5b49c135c915c37173843
parent836efd40317a397761ec8b66e3f4398faac43ad0 (diff)
downloadmlia-3e3dcb9bd5abb88adcd85b4f89e8a81e7f6fa293.tar.gz
MLIA-595 Remove old backend configuration mechanism
- Remove old backend configuration code - Install backends into directory ~/.mlia - Rename targets/backends in registry to make it consistent across codebase. Change-Id: I9c8b012fe863280f1c692940c0dcad3ef638aaae
-rw-r--r--setup.cfg1
-rw-r--r--src/mlia/backend/corstone/__init__.py7
-rw-r--r--src/mlia/backend/corstone/install.py57
-rw-r--r--src/mlia/backend/corstone/performance.py365
-rw-r--r--src/mlia/backend/errors.py6
-rw-r--r--src/mlia/backend/executor/__init__.py3
-rw-r--r--src/mlia/backend/executor/application.py170
-rw-r--r--src/mlia/backend/executor/common.py517
-rw-r--r--src/mlia/backend/executor/config.py68
-rw-r--r--src/mlia/backend/executor/execution.py342
-rw-r--r--src/mlia/backend/executor/fs.py88
-rw-r--r--src/mlia/backend/executor/output_consumer.py67
-rw-r--r--src/mlia/backend/executor/proc.py191
-rw-r--r--src/mlia/backend/executor/runner.py98
-rw-r--r--src/mlia/backend/executor/source.py207
-rw-r--r--src/mlia/backend/executor/system.py178
-rw-r--r--src/mlia/backend/install.py250
-rw-r--r--src/mlia/backend/manager.py12
-rw-r--r--src/mlia/backend/repo.py190
-rw-r--r--src/mlia/backend/tosa_checker/__init__.py2
-rw-r--r--src/mlia/cli/command_validators.py8
-rw-r--r--src/mlia/cli/config.py53
-rw-r--r--src/mlia/cli/main.py2
-rw-r--r--src/mlia/cli/options.py8
-rw-r--r--src/mlia/resources/backend_configs/systems/SYSTEMS.txt9
-rw-r--r--src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json72
-rw-r--r--src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json.license3
-rw-r--r--src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json72
-rw-r--r--src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json.license3
-rw-r--r--src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json72
-rw-r--r--src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json.license3
-rw-r--r--src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json72
-rw-r--r--src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json.license3
-rw-r--r--src/mlia/resources/backends/applications/APPLICATIONS.txt4
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-300-22.08.02-ethos-U55-Default-noTA/backend-config.json14
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-300-22.08.02-ethos-U55-Default-noTA/backend-config.json.license3
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-300-22.08.02-ethos-U65-Default-noTA/backend-config.json14
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-300-22.08.02-ethos-U65-Default-noTA/backend-config.json.license3
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-310-22.08.02-ethos-U55-Default-noTA/backend-config.json14
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-310-22.08.02-ethos-U55-Default-noTA/backend-config.json.license3
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-310-22.08.02-ethos-U65-Default-noTA/backend-config.json14
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-310-22.08.02-ethos-U65-Default-noTA/backend-config.json.license3
-rw-r--r--src/mlia/resources/backends/systems/.gitignore6
-rw-r--r--src/mlia/target/cortex_a/__init__.py4
-rw-r--r--src/mlia/target/ethos_u/__init__.py6
-rw-r--r--src/mlia/target/ethos_u/performance.py26
-rw-r--r--src/mlia/target/registry.py18
-rw-r--r--src/mlia/target/tosa/__init__.py4
-rw-r--r--src/mlia/utils/proc.py55
-rw-r--r--tests/conftest.py74
-rw-r--r--tests/test_backend_corstone.py11
-rw-r--r--tests/test_backend_corstone_install.py483
-rw-r--r--tests/test_backend_corstone_performance.py589
-rw-r--r--tests/test_backend_executor_application.py422
-rw-r--r--tests/test_backend_executor_common.py482
-rw-r--r--tests/test_backend_executor_execution.py212
-rw-r--r--tests/test_backend_executor_fs.py138
-rw-r--r--tests/test_backend_executor_output_consumer.py100
-rw-r--r--tests/test_backend_executor_proc.py190
-rw-r--r--tests/test_backend_executor_runner.py254
-rw-r--r--tests/test_backend_executor_source.py205
-rw-r--r--tests/test_backend_executor_system.py358
-rw-r--r--tests/test_backend_install.py240
-rw-r--r--tests/test_backend_manager.py59
-rw-r--r--tests/test_backend_registry.py20
-rw-r--r--tests/test_backend_repo.py140
-rw-r--r--tests/test_cli_command_validators.py16
-rw-r--r--tests/test_cli_config.py10
-rw-r--r--tests/test_resources/application_config.json94
-rw-r--r--tests/test_resources/application_config.json.license3
-rw-r--r--tests/test_resources/backends/applications/application1/backend-config.json29
-rw-r--r--tests/test_resources/backends/applications/application1/backend-config.json.license3
-rw-r--r--tests/test_resources/backends/applications/application2/backend-config.json29
-rw-r--r--tests/test_resources/backends/applications/application2/backend-config.json.license3
-rw-r--r--tests/test_resources/backends/applications/application3/readme.txt4
-rw-r--r--tests/test_resources/backends/applications/application4/backend-config.json33
-rw-r--r--tests/test_resources/backends/applications/application4/backend-config.json.license3
-rw-r--r--tests/test_resources/backends/applications/application5/backend-config.json134
-rw-r--r--tests/test_resources/backends/applications/application5/backend-config.json.license3
-rw-r--r--tests/test_resources/backends/applications/application6/backend-config.json41
-rw-r--r--tests/test_resources/backends/applications/application6/backend-config.json.license3
-rw-r--r--tests/test_resources/backends/applications/readme.txt4
-rw-r--r--tests/test_resources/backends/systems/system1/backend-config.json24
-rw-r--r--tests/test_resources/backends/systems/system1/backend-config.json.license3
-rw-r--r--tests/test_resources/backends/systems/system1/system_artifact/empty.txt2
-rw-r--r--tests/test_resources/backends/systems/system2/backend-config.json24
-rw-r--r--tests/test_resources/backends/systems/system2/backend-config.json.license3
-rw-r--r--tests/test_resources/backends/systems/system3/readme.txt4
-rw-r--r--tests/test_resources/backends/systems/system4/backend-config.json15
-rw-r--r--tests/test_resources/backends/systems/system4/backend-config.json.license3
-rw-r--r--tests/test_resources/backends/systems/system6/backend-config.json30
-rw-r--r--tests/test_resources/backends/systems/system6/backend-config.json.license3
-rw-r--r--tests/test_resources/hello_world.json53
-rw-r--r--tests/test_resources/hello_world.json.license3
-rwxr-xr-xtests/test_resources/scripts/test_backend_run8
-rw-r--r--tests/test_resources/scripts/test_backend_run_script.sh8
-rw-r--r--tests/test_resources/various/applications/application_with_empty_config/backend-config.json1
-rw-r--r--tests/test_resources/various/applications/application_with_empty_config/backend-config.json.license3
-rw-r--r--tests/test_resources/various/applications/application_with_valid_config/backend-config.json37
-rw-r--r--tests/test_resources/various/applications/application_with_valid_config/backend-config.json.license3
-rw-r--r--tests/test_resources/various/applications/application_with_wrong_config1/backend-config.json2
-rw-r--r--tests/test_resources/various/applications/application_with_wrong_config1/backend-config.json.license3
-rw-r--r--tests/test_resources/various/applications/application_with_wrong_config2/backend-config.json32
-rw-r--r--tests/test_resources/various/applications/application_with_wrong_config2/backend-config.json.license3
-rw-r--r--tests/test_resources/various/applications/application_with_wrong_config3/backend-config.json37
-rw-r--r--tests/test_resources/various/applications/application_with_wrong_config3/backend-config.json.license3
-rw-r--r--tests/test_resources/various/systems/system_with_empty_config/backend-config.json1
-rw-r--r--tests/test_resources/various/systems/system_with_empty_config/backend-config.json.license3
-rw-r--r--tests/test_resources/various/systems/system_with_valid_config/backend-config.json12
-rw-r--r--tests/test_resources/various/systems/system_with_valid_config/backend-config.json.license3
-rw-r--r--tests/test_target_registry.py36
-rw-r--r--tests/test_utils_proc.py17
112 files changed, 1183 insertions, 6975 deletions
diff --git a/setup.cfg b/setup.cfg
index b70c955..bee1edf 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -35,7 +35,6 @@ install_requires =
numpy<1.24
requests~=2.28.1
rich~=12.6.0
- sh~=1.14.3
tomli~=2.0.1 ; python_version<"3.11"
[options.packages.find]
diff --git a/src/mlia/backend/corstone/__init__.py b/src/mlia/backend/corstone/__init__.py
index f89da63..36f74ee 100644
--- a/src/mlia/backend/corstone/__init__.py
+++ b/src/mlia/backend/corstone/__init__.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Corstone backend module."""
from mlia.backend.config import BackendConfiguration
@@ -23,3 +23,8 @@ registry.register(
backend_type=BackendType.CUSTOM,
),
)
+
+
+def is_corstone_backend(backend_name: str) -> bool:
+ """Check if backend belongs to Corstone."""
+ return backend_name in ["Corstone-300", "Corstone-310"]
diff --git a/src/mlia/backend/corstone/install.py b/src/mlia/backend/corstone/install.py
index 2a0e5c9..c57a47b 100644
--- a/src/mlia/backend/corstone/install.py
+++ b/src/mlia/backend/corstone/install.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Module for Corstone based FVPs.
@@ -12,9 +12,7 @@ import logging
import subprocess # nosec
from pathlib import Path
-from mlia.backend.executor.runner import BackendRunner
from mlia.backend.install import BackendInstallation
-from mlia.backend.install import BackendMetadata
from mlia.backend.install import CompoundPathChecker
from mlia.backend.install import Installation
from mlia.backend.install import PackagePathChecker
@@ -33,6 +31,7 @@ class Corstone300Installer:
"""Install Corstone-300 and return path to the models."""
with working_directory(dist_dir):
install_dir = "corstone-300"
+
try:
fvp_install_cmd = [
"./FVP_Corstone_SSE-300.sh",
@@ -62,23 +61,18 @@ class Corstone300Installer:
def get_corstone_300_installation() -> Installation:
"""Get Corstone-300 installation."""
corstone_300 = BackendInstallation(
- backend_runner=BackendRunner(),
# pylint: disable=line-too-long
- metadata=BackendMetadata(
- name="Corstone-300",
- description="Corstone-300 FVP",
- system_config="backend_configs/systems/corstone-300/backend-config.json",
- apps_resources=[],
- fvp_dir_name="corstone_300",
- download_artifact=DownloadArtifact(
- name="Corstone-300 FVP",
- url="https://developer.arm.com/-/media/Arm%20Developer%20Community/Downloads/OSS/FVP/Corstone-300/FVP_Corstone_SSE-300_11.16_26.tgz",
- filename="FVP_Corstone_SSE-300_11.16_26.tgz",
- version="11.16_26",
- sha256_hash="e26139be756b5003a30d978c629de638aed1934d597dc24a17043d4708e934d7",
- ),
- supported_platforms=["Linux"],
+ name="Corstone-300",
+ description="Corstone-300 FVP",
+ fvp_dir_name="corstone_300",
+ download_artifact=DownloadArtifact(
+ name="Corstone-300 FVP",
+ url="https://developer.arm.com/-/media/Arm%20Developer%20Community/Downloads/OSS/FVP/Corstone-300/FVP_Corstone_SSE-300_11.16_26.tgz",
+ filename="FVP_Corstone_SSE-300_11.16_26.tgz",
+ version="11.16_26",
+ sha256_hash="e26139be756b5003a30d978c629de638aed1934d597dc24a17043d4708e934d7",
),
+ supported_platforms=["Linux"],
# pylint: enable=line-too-long
path_checker=CompoundPathChecker(
PackagePathChecker(
@@ -87,6 +81,7 @@ def get_corstone_300_installation() -> Installation:
"models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U65",
],
backend_subfolder="models/Linux64_GCC-6.4",
+ settings={"profile": "default"},
),
StaticPathChecker(
static_backend_path=Path("/opt/VHT"),
@@ -95,9 +90,7 @@ def get_corstone_300_installation() -> Installation:
"VHT_Corstone_SSE-300_Ethos-U65",
],
copy_source=False,
- system_config=(
- "backend_configs/systems/corstone-300-vht/backend-config.json"
- ),
+ settings={"profile": "AVH"},
),
),
backend_installer=Corstone300Installer(),
@@ -109,18 +102,11 @@ def get_corstone_300_installation() -> Installation:
def get_corstone_310_installation() -> Installation:
"""Get Corstone-310 installation."""
corstone_310 = BackendInstallation(
- backend_runner=BackendRunner(),
- # pylint: disable=line-too-long
- metadata=BackendMetadata(
- name="Corstone-310",
- description="Corstone-310 FVP",
- system_config="backend_configs/systems/corstone-310/backend-config.json",
- apps_resources=[],
- fvp_dir_name="corstone_310",
- download_artifact=None,
- supported_platforms=["Linux"],
- ),
- # pylint: enable=line-too-long
+ name="Corstone-310",
+ description="Corstone-310 FVP",
+ fvp_dir_name="corstone_310",
+ download_artifact=None,
+ supported_platforms=["Linux"],
path_checker=CompoundPathChecker(
PackagePathChecker(
expected_files=[
@@ -128,6 +114,7 @@ def get_corstone_310_installation() -> Installation:
"models/Linux64_GCC-9.3/FVP_Corstone_SSE-310_Ethos-U65",
],
backend_subfolder="models/Linux64_GCC-9.3",
+ settings={"profile": "default"},
),
StaticPathChecker(
static_backend_path=Path("/opt/VHT"),
@@ -136,9 +123,7 @@ def get_corstone_310_installation() -> Installation:
"VHT_Corstone_SSE-310_Ethos-U65",
],
copy_source=False,
- system_config=(
- "backend_configs/systems/corstone-310-vht/backend-config.json"
- ),
+ settings={"profile": "AVH"},
),
),
backend_installer=None,
diff --git a/src/mlia/backend/corstone/performance.py b/src/mlia/backend/corstone/performance.py
index 531f0cd..8fd3e40 100644
--- a/src/mlia/backend/corstone/performance.py
+++ b/src/mlia/backend/corstone/performance.py
@@ -3,40 +3,25 @@
"""Module for backend integration."""
from __future__ import annotations
+import base64
+import json
import logging
-from abc import ABC
-from abc import abstractmethod
+import re
+import subprocess # nosec
from dataclasses import dataclass
from pathlib import Path
-from typing import Literal
-from mlia.backend.executor.output_consumer import Base64OutputConsumer
-from mlia.backend.executor.output_consumer import OutputConsumer
-from mlia.backend.executor.runner import BackendRunner
-from mlia.backend.executor.runner import ExecutionParams
-from mlia.backend.install import get_application_name
-from mlia.backend.install import get_system_name
+from mlia.backend.errors import BackendExecutionFailed
+from mlia.backend.repo import get_backend_repository
+from mlia.utils.filesystem import get_mlia_resources
+from mlia.utils.proc import Command
+from mlia.utils.proc import process_command_output
logger = logging.getLogger(__name__)
@dataclass
-class DeviceInfo:
- """Device information."""
-
- device_type: Literal["Ethos-U55", "Ethos-U65", "ethos-u55", "ethos-u65"]
- mac: int
-
-
-@dataclass
-class ModelInfo:
- """Model info."""
-
- model_path: Path
-
-
-@dataclass
class PerformanceMetrics:
"""Performance metrics parsed from generic inference output."""
@@ -48,186 +33,188 @@ class PerformanceMetrics:
npu_axi1_rd_data_beat_received: int
-class LogWriter(OutputConsumer):
- """Redirect output to the logger."""
+class GenericInferenceOutputParser:
+ """Generic inference runner output parser."""
- def feed(self, line: str) -> bool:
- """Process line from the output."""
- logger.debug(line.strip())
- return False
+ pattern = re.compile(r"<metrics>(.*)</metrics>")
+ def __init__(self) -> None:
+ """Init parser."""
+ self.base64_data: list[str] = []
-class GenericInferenceOutputParser(Base64OutputConsumer):
- """Generic inference app output parser."""
+ def __call__(self, line: str) -> None:
+ """Extract base64 strings from the app output."""
+ if res_b64 := self.pattern.search(line):
+ self.base64_data.append(res_b64.group(1))
- def __init__(self) -> None:
- """Init generic inference output parser instance."""
- super().__init__()
- self._map = {
- "NPU ACTIVE": "npu_active_cycles",
- "NPU IDLE": "npu_idle_cycles",
- "NPU TOTAL": "npu_total_cycles",
- "NPU AXI0_RD_DATA_BEAT_RECEIVED": "npu_axi0_rd_data_beat_received",
- "NPU AXI0_WR_DATA_BEAT_WRITTEN": "npu_axi0_wr_data_beat_written",
- "NPU AXI1_RD_DATA_BEAT_RECEIVED": "npu_axi1_rd_data_beat_received",
- }
-
- @property
- def result(self) -> dict:
- """Merge the raw results and map the names to the right output names."""
- merged_result = {}
- for raw_result in self.parsed_output:
- for profiling_result in raw_result:
- for sample in profiling_result["samples"]:
- name, values = (sample["name"], sample["value"])
- if name in merged_result:
- raise KeyError(
- f"Duplicate key '{name}' in base64 output.",
- )
- new_name = self._map[name]
- merged_result[new_name] = values[0]
- return merged_result
-
- def is_ready(self) -> bool:
- """Return true if all expected data has been parsed."""
- return set(self.result.keys()) == set(self._map.values())
-
- def missed_keys(self) -> set[str]:
- """Return a set of the keys that have not been found in the output."""
- return set(self._map.values()) - set(self.result.keys())
-
-
-class GenericInferenceRunner(ABC):
- """Abstract class for generic inference runner."""
-
- def __init__(self, backend_runner: BackendRunner):
- """Init generic inference runner instance."""
- self.backend_runner = backend_runner
-
- def run(
- self, model_info: ModelInfo, output_consumers: list[OutputConsumer]
- ) -> None:
- """Run generic inference for the provided device/model."""
- execution_params = self.get_execution_params(model_info)
-
- ctx = self.backend_runner.run_application(execution_params)
- if ctx.stdout is not None:
- ctx.stdout = self.consume_output(ctx.stdout, output_consumers)
-
- @abstractmethod
- def get_execution_params(self, model_info: ModelInfo) -> ExecutionParams:
- """Get execution params for the provided model."""
-
- def check_system_and_application(self, system_name: str, app_name: str) -> None:
- """Check if requested system and application installed."""
- if not self.backend_runner.is_system_installed(system_name):
- raise Exception(f"System {system_name} is not installed")
-
- if not self.backend_runner.is_application_installed(app_name, system_name):
- raise Exception(
- f"Application {app_name} for the system {system_name} "
- "is not installed"
+ def get_metrics(self) -> PerformanceMetrics:
+ """Parse the collected data and return perf metrics."""
+ try:
+ parsed_metrics = self._parse_data()
+
+ return PerformanceMetrics(
+ parsed_metrics["NPU ACTIVE"],
+ parsed_metrics["NPU IDLE"],
+ parsed_metrics["NPU TOTAL"],
+ parsed_metrics["NPU AXI0_RD_DATA_BEAT_RECEIVED"],
+ parsed_metrics["NPU AXI0_WR_DATA_BEAT_WRITTEN"],
+ parsed_metrics["NPU AXI1_RD_DATA_BEAT_RECEIVED"],
)
+ except Exception as err:
+ raise ValueError("Unable to parse output and get metrics.") from err
- @staticmethod
- def consume_output(output: bytearray, consumers: list[OutputConsumer]) -> bytearray:
- """
- Pass program's output to the consumers and filter it.
-
- Returns the filtered output.
- """
- filtered_output = bytearray()
- for line_bytes in output.splitlines():
- line = line_bytes.decode("utf-8")
- remove_line = False
- for consumer in consumers:
- if consumer.feed(line):
- remove_line = True
- if not remove_line:
- filtered_output.extend(line_bytes)
-
- return filtered_output
-
-
-class GenericInferenceRunnerEthosU(GenericInferenceRunner):
- """Generic inference runner on U55/65."""
-
- def __init__(
- self, backend_runner: BackendRunner, device_info: DeviceInfo, backend: str
- ) -> None:
- """Init generic inference runner instance."""
- super().__init__(backend_runner)
-
- system_name, app_name = self.resolve_system_and_app(device_info, backend)
- self.system_name = system_name
- self.app_name = app_name
- self.device_info = device_info
-
- @staticmethod
- def resolve_system_and_app(
- device_info: DeviceInfo, backend: str
- ) -> tuple[str, str]:
- """Find appropriate system and application for the provided device/backend."""
- try:
- system_name = get_system_name(backend, device_info.device_type)
- except KeyError as ex:
- raise RuntimeError(
- f"Unsupported device {device_info.device_type} "
- f"for backend {backend}"
- ) from ex
+ def _parse_data(self) -> dict[str, int]:
+ """Parse the data."""
+ parsed_metrics: dict[str, int] = {}
- try:
- app_name = get_application_name(system_name)
- except KeyError as err:
- raise RuntimeError(f"System {system_name} is not installed") from err
-
- return system_name, app_name
-
- def get_execution_params(self, model_info: ModelInfo) -> ExecutionParams:
- """Get execution params for Ethos-U55/65."""
- self.check_system_and_application(self.system_name, self.app_name)
-
- system_params = [
- f"mac={self.device_info.mac}",
- f"input_file={model_info.model_path.absolute()}",
- ]
-
- return ExecutionParams(
- self.app_name,
- self.system_name,
- [],
- system_params,
- )
+ for base64_item in self.base64_data:
+ res_json = base64.b64decode(base64_item, validate=True)
+ for profiling_group in json.loads(res_json):
+ for metric in profiling_group["samples"]:
+ metric_name = metric["name"]
+ metric_value = int(metric["value"][0])
-def get_generic_runner(device_info: DeviceInfo, backend: str) -> GenericInferenceRunner:
- """Get generic runner for provided device and backend."""
- backend_runner = get_backend_runner()
- return GenericInferenceRunnerEthosU(backend_runner, device_info, backend)
+ if metric_name in parsed_metrics:
+ raise KeyError(f"Duplicate key {metric_name}")
+ parsed_metrics[metric_name] = metric_value
-def estimate_performance(
- model_info: ModelInfo, device_info: DeviceInfo, backend: str
+ return parsed_metrics
+
+
+@dataclass
+class FVPMetadata:
+ """Metadata for FVP."""
+
+ executable: str
+ generic_inf_app: Path
+
+
+def get_generic_inference_app_path(fvp: str, target: str) -> Path:
+ """Return path to the generic inference runner binary."""
+ apps_path = get_mlia_resources() / "backends/applications"
+
+ fvp_mapping = {"Corstone-300": "300", "Corstone-310": "310"}
+ target_mapping = {"ethos-u55": "U55", "ethos-u65": "U65"}
+
+ fvp_version = f"sse-{fvp_mapping[fvp]}"
+ app_version = f"22.08.02-ethos-{target_mapping[target]}-Default-noTA"
+
+ app_dir = f"inference_runner-{fvp_version}-{app_version}"
+ return apps_path.joinpath(app_dir, "ethos-u-inference_runner.axf")
+
+
+def get_executable_name(fvp: str, profile: str, target: str) -> str:
+ """Return name of the executable for selected FVP and profile."""
+ executable_name_mapping = {
+ ("Corstone-300", "AVH", "ethos-u55"): "VHT_Corstone_SSE-300_Ethos-U55",
+ ("Corstone-300", "AVH", "ethos-u65"): "VHT_Corstone_SSE-300_Ethos-U65",
+ ("Corstone-300", "default", "ethos-u55"): "FVP_Corstone_SSE-300_Ethos-U55",
+ ("Corstone-300", "default", "ethos-u65"): "FVP_Corstone_SSE-300_Ethos-U65",
+ ("Corstone-310", "AVH", "ethos-u55"): "VHT_Corstone_SSE-310",
+ ("Corstone-310", "AVH", "ethos-u65"): "VHT_Corstone_SSE-310_Ethos-U65",
+ }
+
+ return executable_name_mapping[(fvp, profile, target)]
+
+
+def get_fvp_metadata(fvp: str, profile: str, target: str) -> FVPMetadata:
+ """Return metadata for selected Corstone backend."""
+ executable_name = get_executable_name(fvp, profile, target)
+ app = get_generic_inference_app_path(fvp, target)
+
+ return FVPMetadata(executable_name, app)
+
+
+def build_corstone_command(
+ backend_path: Path,
+ fvp: str,
+ target: str,
+ mac: int,
+ model: Path,
+ profile: str,
+) -> Command:
+ """Build command to run Corstone FVP."""
+ fvp_metadata = get_fvp_metadata(fvp, profile, target)
+
+ cmd = [
+ backend_path.joinpath(fvp_metadata.executable).as_posix(),
+ "-a",
+ fvp_metadata.generic_inf_app.as_posix(),
+ "--data",
+ f"{model}@0x90000000",
+ "-C",
+ f"ethosu.num_macs={mac}",
+ "-C",
+ "mps3_board.telnetterminal0.start_telnet=0",
+ "-C",
+ "mps3_board.uart0.out_file='-'",
+ "-C",
+ "mps3_board.uart0.shutdown_on_eot=1",
+ "-C",
+ "mps3_board.visualisation.disable-visualisation=1",
+ "--stat",
+ ]
+
+ return Command(cmd)
+
+
+def get_metrics(
+ backend_path: Path,
+ fvp: str,
+ target: str,
+ mac: int,
+ model: Path,
+ profile: str = "default",
) -> PerformanceMetrics:
- """Get performance estimations."""
- output_parser = GenericInferenceOutputParser()
- output_consumers = [output_parser, LogWriter()]
+ """Run generic inference and return perf metrics."""
+ try:
+ command = build_corstone_command(
+ backend_path,
+ fvp,
+ target,
+ mac,
+ model,
+ profile,
+ )
+ except Exception as err:
+ raise BackendExecutionFailed(
+ f"Unable to construct a command line for {fvp}"
+ ) from err
- generic_runner = get_generic_runner(device_info, backend)
- generic_runner.run(model_info, output_consumers)
+ output_parser = GenericInferenceOutputParser()
- if not output_parser.is_ready():
- missed_data = ",".join(output_parser.missed_keys())
- logger.debug("Unable to get performance metrics, missed data %s", missed_data)
- raise Exception("Unable to get performance metrics, insufficient data")
+ def redirect_to_log(line: str) -> None:
+ """Redirect FVP output to the logger."""
+ logger.debug(line.strip())
- return PerformanceMetrics(**output_parser.result)
+ try:
+ process_command_output(
+ command,
+ [output_parser, redirect_to_log],
+ )
+ except subprocess.CalledProcessError as err:
+ raise BackendExecutionFailed("Backend execution failed.") from err
+ return output_parser.get_metrics()
-def get_backend_runner() -> BackendRunner:
- """
- Return BackendRunner instance.
- Note: This is needed for the unit tests.
- """
- return BackendRunner()
+def estimate_performance(
+ target: str, mac: int, model: Path, backend: str
+) -> PerformanceMetrics:
+ """Get performance estimations."""
+ backend_repo = get_backend_repository()
+ backend_path, settings = backend_repo.get_backend_settings(backend)
+
+ if not settings or "profile" not in settings:
+ raise BackendExecutionFailed(f"Unable to configure backend {backend}.")
+
+ return get_metrics(
+ backend_path,
+ backend,
+ target,
+ mac,
+ model,
+ settings["profile"],
+ )
diff --git a/src/mlia/backend/errors.py b/src/mlia/backend/errors.py
index bd5da95..cf0ffad 100644
--- a/src/mlia/backend/errors.py
+++ b/src/mlia/backend/errors.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Backend errors."""
@@ -10,3 +10,7 @@ class BackendUnavailableError(Exception):
"""Init error."""
super().__init__(msg)
self.backend = backend
+
+
+class BackendExecutionFailed(Exception):
+ """Backend execution failed."""
diff --git a/src/mlia/backend/executor/__init__.py b/src/mlia/backend/executor/__init__.py
deleted file mode 100644
index 3d60372..0000000
--- a/src/mlia/backend/executor/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Backend module."""
diff --git a/src/mlia/backend/executor/application.py b/src/mlia/backend/executor/application.py
deleted file mode 100644
index 738ac4e..0000000
--- a/src/mlia/backend/executor/application.py
+++ /dev/null
@@ -1,170 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Application backend module."""
-from __future__ import annotations
-
-import re
-from pathlib import Path
-from typing import Any
-from typing import cast
-from typing import List
-
-from mlia.backend.executor.common import Backend
-from mlia.backend.executor.common import ConfigurationException
-from mlia.backend.executor.common import get_backend_configs
-from mlia.backend.executor.common import get_backend_directories
-from mlia.backend.executor.common import load_application_configs
-from mlia.backend.executor.common import load_config
-from mlia.backend.executor.common import remove_backend
-from mlia.backend.executor.config import ApplicationConfig
-from mlia.backend.executor.config import ExtendedApplicationConfig
-from mlia.backend.executor.fs import get_backends_path
-from mlia.backend.executor.source import create_destination_and_install
-from mlia.backend.executor.source import get_source
-
-
-def get_available_application_directory_names() -> list[str]:
- """Return a list of directory names for all available applications."""
- return [entry.name for entry in get_backend_directories("applications")]
-
-
-def get_available_applications() -> list[Application]:
- """Return a list with all available applications."""
- available_applications = []
- for config_json in get_backend_configs("applications"):
- config_entries = cast(List[ExtendedApplicationConfig], load_config(config_json))
- for config_entry in config_entries:
- config_entry["config_location"] = config_json.parent.absolute()
- applications = load_applications(config_entry)
- available_applications += applications
-
- return sorted(available_applications, key=lambda application: application.name)
-
-
-def get_application(
- application_name: str, system_name: str | None = None
-) -> list[Application]:
- """Return a list of application instances with provided name."""
- return [
- application
- for application in get_available_applications()
- if application.name == application_name
- and (not system_name or application.can_run_on(system_name))
- ]
-
-
-def install_application(source_path: Path) -> None:
- """Install application."""
- try:
- source = get_source(source_path)
- config = cast(List[ExtendedApplicationConfig], source.config())
- applications_to_install = [
- s for entry in config for s in load_applications(entry)
- ]
- except Exception as error:
- raise ConfigurationException("Unable to read application definition") from error
-
- if not applications_to_install:
- raise ConfigurationException("No application definition found")
-
- available_applications = get_available_applications()
- already_installed = [
- s for s in applications_to_install if s in available_applications
- ]
- if already_installed:
- names = {application.name for application in already_installed}
- raise ConfigurationException(
- f"Applications [{','.join(names)}] are already installed."
- )
-
- create_destination_and_install(source, get_backends_path("applications"))
-
-
-def remove_application(directory_name: str) -> None:
- """Remove application directory."""
- remove_backend(directory_name, "applications")
-
-
-def get_unique_application_names(system_name: str | None = None) -> list[str]:
- """Extract a list of unique application names of all application available."""
- return list(
- {
- application.name
- for application in get_available_applications()
- if not system_name or application.can_run_on(system_name)
- }
- )
-
-
-class Application(Backend):
- """Class for representing a single application component."""
-
- def __init__(self, config: ApplicationConfig) -> None:
- """Construct a Application instance from a dict."""
- super().__init__(config)
-
- self.supported_systems = config.get("supported_systems", [])
-
- def __eq__(self, other: object) -> bool:
- """Overload operator ==."""
- if not isinstance(other, Application):
- return False
-
- return (
- super().__eq__(other)
- and self.name == other.name
- and set(self.supported_systems) == set(other.supported_systems)
- )
-
- def can_run_on(self, system_name: str) -> bool:
- """Check if the application can run on the system passed as argument."""
- return system_name in self.supported_systems
-
- def get_details(self) -> dict[str, Any]:
- """Return dictionary with information about the Application instance."""
- output = {
- "type": "application",
- "name": self.name,
- "description": self.description,
- "supported_systems": self.supported_systems,
- "commands": self._get_command_details(),
- }
-
- return output
-
- def remove_unused_params(self) -> None:
- """Remove unused params in commands.
-
- After merging default and system related configuration application
- could have parameters that are not being used in commands. They
- should be removed.
- """
- for command in self.commands.values():
- indexes_or_aliases = [
- m
- for cmd_str in command.command_strings
- for m in re.findall(r"{user_params:(?P<index_or_alias>\w+)}", cmd_str)
- ]
-
- only_aliases = all(not item.isnumeric() for item in indexes_or_aliases)
- if only_aliases:
- used_params = [
- param
- for param in command.params
- if param.alias in indexes_or_aliases
- ]
- command.params = used_params
-
-
-def load_applications(config: ExtendedApplicationConfig) -> list[Application]:
- """Load application.
-
- Application configuration could contain different parameters/commands for different
- supported systems. For each supported system this function will return separate
- Application instance with appropriate configuration.
- """
- configs = load_application_configs(config, ApplicationConfig)
- applications = [Application(cfg) for cfg in configs]
- for application in applications:
- application.remove_unused_params()
- return applications
diff --git a/src/mlia/backend/executor/common.py b/src/mlia/backend/executor/common.py
deleted file mode 100644
index 48dbd4a..0000000
--- a/src/mlia/backend/executor/common.py
+++ /dev/null
@@ -1,517 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Contain all common functions for the backends."""
-from __future__ import annotations
-
-import json
-import logging
-import re
-from abc import ABC
-from collections import Counter
-from pathlib import Path
-from typing import Any
-from typing import Callable
-from typing import cast
-from typing import Final
-from typing import IO
-from typing import Iterable
-from typing import Match
-from typing import NamedTuple
-from typing import Pattern
-
-from mlia.backend.executor.config import BackendConfig
-from mlia.backend.executor.config import BaseBackendConfig
-from mlia.backend.executor.config import NamedExecutionConfig
-from mlia.backend.executor.config import UserParamConfig
-from mlia.backend.executor.config import UserParamsConfig
-from mlia.backend.executor.fs import get_backends_path
-from mlia.backend.executor.fs import remove_resource
-from mlia.backend.executor.fs import ResourceType
-
-
-BACKEND_CONFIG_FILE: Final[str] = "backend-config.json"
-
-
-class ConfigurationException(Exception):
- """Configuration exception."""
-
-
-def get_backend_config(dir_path: Path) -> Path:
- """Get path to backendir configuration file."""
- return dir_path / BACKEND_CONFIG_FILE
-
-
-def get_backend_configs(resource_type: ResourceType) -> Iterable[Path]:
- """Get path to the backend configs for provided resource_type."""
- return (
- get_backend_config(entry) for entry in get_backend_directories(resource_type)
- )
-
-
-def get_backend_directories(resource_type: ResourceType) -> Iterable[Path]:
- """Get path to the backend directories for provided resource_type."""
- return (
- entry
- for entry in get_backends_path(resource_type).iterdir()
- if is_backend_directory(entry)
- )
-
-
-def is_backend_directory(dir_path: Path) -> bool:
- """Check if path is backend's configuration directory."""
- return dir_path.is_dir() and get_backend_config(dir_path).is_file()
-
-
-def remove_backend(directory_name: str, resource_type: ResourceType) -> None:
- """Remove backend with provided type and directory_name."""
- if not directory_name:
- raise Exception("No directory name provided")
-
- remove_resource(directory_name, resource_type)
-
-
-def load_config(config: Path | IO[bytes] | None) -> BackendConfig:
- """Return a loaded json file."""
- if config is None:
- raise Exception("Unable to read config")
-
- if isinstance(config, Path):
- with config.open() as json_file:
- return cast(BackendConfig, json.load(json_file))
-
- return cast(BackendConfig, json.load(config))
-
-
-def parse_raw_parameter(parameter: str) -> tuple[str, str | None]:
- """Split the parameter string in name and optional value.
-
- It manages the following cases:
- --param=1 -> --param, 1
- --param 1 -> --param, 1
- --flag -> --flag, None
- """
- data = re.split(" |=", parameter)
- if len(data) == 1:
- param_name = data[0]
- param_value = None
- else:
- param_name = " ".join(data[0:-1])
- param_value = data[-1]
- return param_name, param_value
-
-
-class DataPaths(NamedTuple):
- """DataPaths class."""
-
- src: Path
- dst: str
-
-
-class Backend(ABC):
- """Backend class."""
-
- # pylint: disable=too-many-instance-attributes
-
- def __init__(self, config: BaseBackendConfig):
- """Initialize backend."""
- name = config.get("name")
- if not name:
- raise ConfigurationException("Name is empty")
-
- self.name = name
- self.description = config.get("description", "")
- self.config_location = config.get("config_location")
- self.variables = config.get("variables", {})
- self.annotations = config.get("annotations", {})
-
- self._parse_commands_and_params(config)
-
- def validate_parameter(self, command_name: str, parameter: str) -> bool:
- """Validate the parameter string against the application configuration.
-
- We take the parameter string, extract the parameter name/value and
- check them against the current configuration.
- """
- param_name, param_value = parse_raw_parameter(parameter)
- valid_param_name = valid_param_value = False
-
- command = self.commands.get(command_name)
- if not command:
- raise AttributeError(f"Unknown command: '{command_name}'")
-
- # Iterate over all available parameters until we have a match.
- for param in command.params:
- if self._same_parameter(param_name, param):
- valid_param_name = True
- # This is a non-empty list
- if param.values:
- # We check if the value is allowed in the configuration
- valid_param_value = param_value in param.values
- else:
- # In this case we don't validate the value and accept
- # whatever we have set.
- valid_param_value = True
- break
-
- return valid_param_name and valid_param_value
-
- def __eq__(self, other: object) -> bool:
- """Overload operator ==."""
- if not isinstance(other, Backend):
- return False
-
- return (
- self.name == other.name
- and self.description == other.description
- and self.commands == other.commands
- )
-
- def __repr__(self) -> str:
- """Represent the Backend instance by its name."""
- return self.name
-
- def _parse_commands_and_params(self, config: BaseBackendConfig) -> None:
- """Parse commands and user parameters."""
- self.commands: dict[str, Command] = {}
-
- commands = config.get("commands")
- if commands:
- params = config.get("user_params")
-
- for command_name in commands.keys():
- command_params = self._parse_params(params, command_name)
- command_strings = [
- self._substitute_variables(cmd)
- for cmd in commands.get(command_name, [])
- ]
- self.commands[command_name] = Command(command_strings, command_params)
-
- def _substitute_variables(self, str_val: str) -> str:
- """Substitute variables in string.
-
- Variables is being substituted at backend's creation stage because
- they could contain references to other params which will be
- resolved later.
- """
- if not str_val:
- return str_val
-
- var_pattern: Final[Pattern] = re.compile(r"{variables:(?P<var_name>\w+)}")
-
- def var_value(match: Match) -> str:
- var_name = match["var_name"]
- if var_name not in self.variables:
- raise ConfigurationException(f"Unknown variable {var_name}")
-
- return self.variables[var_name]
-
- return var_pattern.sub(var_value, str_val)
-
- @classmethod
- def _parse_params(
- cls, params: UserParamsConfig | None, command: str
- ) -> list[Param]:
- if not params:
- return []
-
- return [cls._parse_param(p) for p in params.get(command, [])]
-
- @classmethod
- def _parse_param(cls, param: UserParamConfig) -> Param:
- """Parse a single parameter."""
- name = param.get("name")
- if name is not None and not name:
- raise ConfigurationException("Parameter has an empty 'name' attribute.")
- values = param.get("values", None)
- default_value = param.get("default_value", None)
- description = param.get("description", "")
- alias = param.get("alias")
-
- return Param(
- name=name,
- description=description,
- values=values,
- default_value=default_value,
- alias=alias,
- )
-
- def _get_command_details(self) -> dict:
- command_details = {
- command_name: command.get_details()
- for command_name, command in self.commands.items()
- }
- return command_details
-
- def _get_user_param_value(self, user_params: list[str], param: Param) -> str | None:
- """Get the user-specified value of a parameter."""
- for user_param in user_params:
- user_param_name, user_param_value = parse_raw_parameter(user_param)
- if user_param_name == param.name:
- warn_message = (
- "The direct use of parameter name is deprecated"
- " and might be removed in the future.\n"
- f"Please use alias '{param.alias}' instead of "
- "'{user_param_name}' to provide the parameter."
- )
- logging.warning(warn_message)
-
- if self._same_parameter(user_param_name, param):
- return user_param_value
-
- return None
-
- @staticmethod
- def _same_parameter(user_param_name_or_alias: str, param: Param) -> bool:
- """Compare user parameter name with param name or alias."""
- # Strip the "=" sign in the param_name. This is needed just for
- # comparison with the parameters passed by the user.
- # The equal sign needs to be honoured when re-building the
- # parameter back.
- param_name = None if not param.name else param.name.rstrip("=")
- return user_param_name_or_alias in [param_name, param.alias]
-
- def resolved_parameters(
- self, command_name: str, user_params: list[str]
- ) -> list[tuple[str | None, Param]]:
- """Return list of parameters with values."""
- result: list[tuple[str | None, Param]] = []
- command = self.commands.get(command_name)
- if not command:
- return result
-
- for param in command.params:
- value = self._get_user_param_value(user_params, param)
- if not value:
- value = param.default_value
- result.append((value, param))
-
- return result
-
- def build_command(
- self,
- command_name: str,
- user_params: list[str],
- param_resolver: Callable[[str, str, list[tuple[str | None, Param]]], str],
- ) -> list[str]:
- """
- Return a list of executable command strings.
-
- Given a command and associated parameters, returns a list of executable command
- strings.
- """
- command = self.commands.get(command_name)
- if not command:
- raise ConfigurationException(
- f"Command '{command_name}' could not be found."
- )
-
- commands_to_run = []
-
- params_values = self.resolved_parameters(command_name, user_params)
- for cmd_str in command.command_strings:
- cmd_str = resolve_all_parameters(
- cmd_str, param_resolver, command_name, params_values
- )
- commands_to_run.append(cmd_str)
-
- return commands_to_run
-
-
-class Param:
- """Class for representing a generic application parameter."""
-
- def __init__( # pylint: disable=too-many-arguments
- self,
- name: str | None,
- description: str,
- values: list[str] | None = None,
- default_value: str | None = None,
- alias: str | None = None,
- ) -> None:
- """Construct a Param instance."""
- if not name and not alias:
- raise ConfigurationException(
- "Either name, alias or both must be set to identify a parameter."
- )
- self.name = name
- self.values = values
- self.description = description
- self.default_value = default_value
- self.alias = alias
-
- def get_details(self) -> dict:
- """Return a dictionary with all relevant information of a Param."""
- return {key: value for key, value in self.__dict__.items() if value}
-
- def __eq__(self, other: object) -> bool:
- """Overload operator ==."""
- if not isinstance(other, Param):
- return False
-
- return (
- self.name == other.name
- and self.values == other.values
- and self.default_value == other.default_value
- and self.description == other.description
- )
-
-
-class Command:
- """Class for representing a command."""
-
- def __init__(
- self, command_strings: list[str], params: list[Param] | None = None
- ) -> None:
- """Construct a Command instance."""
- self.command_strings = command_strings
-
- if params:
- self.params = params
- else:
- self.params = []
-
- self._validate()
-
- def _validate(self) -> None:
- """Validate command."""
- if not self.params:
- return
-
- aliases = [param.alias for param in self.params if param.alias is not None]
- repeated_aliases = [
- alias for alias, count in Counter(aliases).items() if count > 1
- ]
-
- if repeated_aliases:
- raise ConfigurationException(
- f"Non-unique aliases {', '.join(repeated_aliases)}"
- )
-
- both_name_and_alias = [
- param.name
- for param in self.params
- if param.name in aliases and param.name != param.alias
- ]
- if both_name_and_alias:
- raise ConfigurationException(
- f"Aliases {', '.join(both_name_and_alias)} could not be used "
- "as parameter name."
- )
-
- def get_details(self) -> dict:
- """Return a dictionary with all relevant information of a Command."""
- output = {
- "command_strings": self.command_strings,
- "user_params": [param.get_details() for param in self.params],
- }
- return output
-
- def __eq__(self, other: object) -> bool:
- """Overload operator ==."""
- if not isinstance(other, Command):
- return False
-
- return (
- self.command_strings == other.command_strings
- and self.params == other.params
- )
-
-
-def resolve_all_parameters(
- str_val: str,
- param_resolver: Callable[[str, str, list[tuple[str | None, Param]]], str],
- command_name: str | None = None,
- params_values: list[tuple[str | None, Param]] | None = None,
-) -> str:
- """Resolve all parameters in the string."""
- if not str_val:
- return str_val
-
- param_pattern: Final[Pattern] = re.compile(r"{(?P<param_name>[\w.:]+)}")
- while param_pattern.findall(str_val):
- str_val = param_pattern.sub(
- lambda m: param_resolver(
- m["param_name"], command_name or "", params_values or []
- ),
- str_val,
- )
- return str_val
-
-
-def load_application_configs(
- config: Any,
- config_type: type[Any],
- is_system_required: bool = True,
-) -> Any:
- """Get one config for each system supported by the application.
-
- The configuration could contain different parameters/commands for different
- supported systems. For each supported system this function will return separate
- config with appropriate configuration.
- """
- merged_configs = []
- supported_systems: list[NamedExecutionConfig] | None = config.get(
- "supported_systems"
- )
- if not supported_systems:
- if is_system_required:
- raise ConfigurationException("No supported systems definition provided")
- # Create an empty system to be used in the parsing below
- supported_systems = [cast(NamedExecutionConfig, {})]
-
- default_user_params = config.get("user_params", {})
-
- def merge_config(system: NamedExecutionConfig) -> Any:
- system_name = system.get("name")
- if not system_name and is_system_required:
- raise ConfigurationException(
- "Unable to read supported system definition, name is missed"
- )
-
- merged_config = config_type(**config)
- merged_config["supported_systems"] = [system_name] if system_name else []
- # merge default configuration and specific to the system
- merged_config["commands"] = {
- **config.get("commands", {}),
- **system.get("commands", {}),
- }
-
- params = {}
- tool_user_params = system.get("user_params", {})
- command_names = tool_user_params.keys() | default_user_params.keys()
- for command_name in command_names:
- if command_name not in merged_config["commands"]:
- continue
-
- params_default = default_user_params.get(command_name, [])
- params_tool = tool_user_params.get(command_name, [])
- if not params_default or not params_tool:
- params[command_name] = params_tool or params_default
- if params_default and params_tool:
- if any(not p.get("alias") for p in params_default):
- raise ConfigurationException(
- f"Default parameters for command {command_name} "
- "should have aliases"
- )
- if any(not p.get("alias") for p in params_tool):
- raise ConfigurationException(
- f"{system_name} parameters for command {command_name} "
- "should have aliases."
- )
-
- merged_by_alias = {
- **{p.get("alias"): p for p in params_default},
- **{p.get("alias"): p for p in params_tool},
- }
- params[command_name] = list(merged_by_alias.values())
-
- merged_config["user_params"] = params
- merged_config["variables"] = {
- **config.get("variables", {}),
- **system.get("variables", {}),
- }
- return merged_config
-
- merged_configs = [merge_config(system) for system in supported_systems]
-
- return merged_configs
diff --git a/src/mlia/backend/executor/config.py b/src/mlia/backend/executor/config.py
deleted file mode 100644
index dca53da..0000000
--- a/src/mlia/backend/executor/config.py
+++ /dev/null
@@ -1,68 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Contain definition of backend configuration."""
-from __future__ import annotations
-
-from pathlib import Path
-from typing import Dict
-from typing import List
-from typing import TypedDict
-from typing import Union
-
-
-class UserParamConfig(TypedDict, total=False):
- """User parameter configuration."""
-
- name: str | None
- default_value: str
- values: list[str]
- description: str
- alias: str
-
-
-UserParamsConfig = Dict[str, List[UserParamConfig]]
-
-
-class ExecutionConfig(TypedDict, total=False):
- """Execution configuration."""
-
- commands: dict[str, list[str]]
- user_params: UserParamsConfig
- variables: dict[str, str]
-
-
-class NamedExecutionConfig(ExecutionConfig):
- """Execution configuration with name."""
-
- name: str
-
-
-class BaseBackendConfig(ExecutionConfig, total=False):
- """Base backend configuration."""
-
- name: str
- description: str
- config_location: Path
- annotations: dict[str, str | list[str]]
-
-
-class ApplicationConfig(BaseBackendConfig, total=False):
- """Application configuration."""
-
- supported_systems: list[str]
-
-
-class ExtendedApplicationConfig(BaseBackendConfig, total=False):
- """Extended application configuration."""
-
- supported_systems: list[NamedExecutionConfig]
-
-
-class SystemConfig(BaseBackendConfig, total=False):
- """System configuration."""
-
- reporting: dict[str, dict]
-
-
-BackendItemConfig = Union[ApplicationConfig, SystemConfig]
-BackendConfig = Union[List[ExtendedApplicationConfig], List[SystemConfig]]
diff --git a/src/mlia/backend/executor/execution.py b/src/mlia/backend/executor/execution.py
deleted file mode 100644
index e253b16..0000000
--- a/src/mlia/backend/executor/execution.py
+++ /dev/null
@@ -1,342 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Application execution module."""
-from __future__ import annotations
-
-import logging
-import re
-from typing import cast
-
-from mlia.backend.executor.application import Application
-from mlia.backend.executor.application import get_application
-from mlia.backend.executor.common import Backend
-from mlia.backend.executor.common import ConfigurationException
-from mlia.backend.executor.common import Param
-from mlia.backend.executor.system import get_system
-from mlia.backend.executor.system import System
-
-logger = logging.getLogger(__name__)
-
-
-class AnotherInstanceIsRunningException(Exception):
- """Concurrent execution error."""
-
-
-class ExecutionContext: # pylint: disable=too-few-public-methods
- """Command execution context."""
-
- def __init__(
- self,
- app: Application,
- app_params: list[str],
- system: System,
- system_params: list[str],
- ):
- """Init execution context."""
- self.app = app
- self.app_params = app_params
- self.system = system
- self.system_params = system_params
-
- self.param_resolver = ParamResolver(self)
-
- self.stdout: bytearray | None = None
- self.stderr: bytearray | None = None
-
-
-class ParamResolver:
- """Parameter resolver."""
-
- def __init__(self, context: ExecutionContext):
- """Init parameter resolver."""
- self.ctx = context
-
- @staticmethod
- def resolve_user_params(
- cmd_name: str | None,
- index_or_alias: str,
- resolved_params: list[tuple[str | None, Param]] | None,
- ) -> str:
- """Resolve user params."""
- if not cmd_name or resolved_params is None:
- raise ConfigurationException("Unable to resolve user params")
-
- param_value: str | None = None
- param: Param | None = None
-
- if index_or_alias.isnumeric():
- i = int(index_or_alias)
- if i not in range(len(resolved_params)):
- raise ConfigurationException(
- f"Invalid index {i} for user params of command {cmd_name}"
- )
- param_value, param = resolved_params[i]
- else:
- for val, par in resolved_params:
- if par.alias == index_or_alias:
- param_value, param = val, par
- break
-
- if param is None:
- raise ConfigurationException(
- f"No user parameter for command '{cmd_name}' with "
- f"alias '{index_or_alias}'."
- )
-
- if param_value:
- # We need to handle to cases of parameters here:
- # 1) Optional parameters (non-positional with a name and value)
- # 2) Positional parameters (value only, no name needed)
- # Default to empty strings for positional arguments
- param_name = ""
- separator = ""
- if param.name is not None:
- # A valid param name means we have an optional/non-positional argument:
- # The separator is an empty string in case the param_name
- # has an equal sign as we have to honour it.
- # If the parameter doesn't end with an equal sign then a
- # space character is injected to split the parameter name
- # and its value
- param_name = param.name
- separator = "" if param.name.endswith("=") else " "
-
- return f"{param_name}{separator}{param_value}"
-
- if param.name is None:
- raise ConfigurationException(
- f"Missing user parameter with alias '{index_or_alias}' for "
- f"command '{cmd_name}'."
- )
-
- return param.name # flag: just return the parameter name
-
- def resolve_commands_and_params(
- self, backend_type: str, cmd_name: str, return_params: bool, index_or_alias: str
- ) -> str:
- """Resolve command or command's param value."""
- if backend_type == "system":
- backend = cast(Backend, self.ctx.system)
- backend_params = self.ctx.system_params
- else: # Application backend
- backend = cast(Backend, self.ctx.app)
- backend_params = self.ctx.app_params
-
- if cmd_name not in backend.commands:
- raise ConfigurationException(f"Command {cmd_name} not found")
-
- if return_params:
- params = backend.resolved_parameters(cmd_name, backend_params)
- if index_or_alias.isnumeric():
- i = int(index_or_alias)
- if i not in range(len(params)):
- raise ConfigurationException(
- f"Invalid parameter index {i} for command {cmd_name}"
- )
-
- param_value = params[i][0]
- else:
- param_value = None
- for value, param in params:
- if param.alias == index_or_alias:
- param_value = value
- break
-
- if not param_value:
- raise ConfigurationException(
- "No value for parameter with index or "
- f"alias {index_or_alias} of command {cmd_name}."
- )
- return param_value
-
- if not index_or_alias.isnumeric():
- raise ConfigurationException(f"Bad command index {index_or_alias}")
-
- i = int(index_or_alias)
- commands = backend.build_command(cmd_name, backend_params, self.param_resolver)
- if i not in range(len(commands)):
- raise ConfigurationException(f"Invalid index {i} for command {cmd_name}")
-
- return commands[i]
-
- def resolve_variables(self, backend_type: str, var_name: str) -> str:
- """Resolve variable value."""
- if backend_type == "system":
- backend = cast(Backend, self.ctx.system)
- else: # Application backend
- backend = cast(Backend, self.ctx.app)
-
- if var_name not in backend.variables:
- raise ConfigurationException(f"Unknown variable {var_name}")
-
- return backend.variables[var_name]
-
- def param_matcher(
- self,
- param_name: str,
- cmd_name: str | None,
- resolved_params: list[tuple[str | None, Param]] | None,
- ) -> str:
- """Regexp to resolve a param from the param_name."""
- # this pattern supports parameter names like "application.commands.run:0" and
- # "system.commands.run.params:0"
- # Note: 'software' is included for backward compatibility.
- commands_and_params_match = re.match(
- r"(?P<type>application|software|system)[.]commands[.]"
- r"(?P<name>\w+)"
- r"(?P<params>[.]params|)[:]"
- r"(?P<index_or_alias>\w+)",
- param_name,
- )
-
- if commands_and_params_match:
- backend_type, cmd_name, return_params, index_or_alias = (
- commands_and_params_match["type"],
- commands_and_params_match["name"],
- commands_and_params_match["params"],
- commands_and_params_match["index_or_alias"],
- )
- return self.resolve_commands_and_params(
- backend_type, cmd_name, bool(return_params), index_or_alias
- )
-
- # Note: 'software' is included for backward compatibility.
- variables_match = re.match(
- r"(?P<type>application|software|system)[.]variables:(?P<var_name>\w+)",
- param_name,
- )
- if variables_match:
- backend_type, var_name = (
- variables_match["type"],
- variables_match["var_name"],
- )
- return self.resolve_variables(backend_type, var_name)
-
- user_params_match = re.match(r"user_params:(?P<index_or_alias>\w+)", param_name)
- if user_params_match:
- index_or_alias = user_params_match["index_or_alias"]
- return self.resolve_user_params(cmd_name, index_or_alias, resolved_params)
-
- raise ConfigurationException(f"Unable to resolve parameter {param_name}")
-
- def param_resolver(
- self,
- param_name: str,
- cmd_name: str | None = None,
- resolved_params: list[tuple[str | None, Param]] | None = None,
- ) -> str:
- """Resolve parameter value based on current execution context."""
- # Note: 'software.*' is included for backward compatibility.
- resolved_param = None
- if param_name in ["application.name", "software.name"]:
- resolved_param = self.ctx.app.name
- elif param_name in ["application.description", "software.description"]:
- resolved_param = self.ctx.app.description
- elif self.ctx.app.config_location and (
- param_name in ["application.config_dir", "software.config_dir"]
- ):
- resolved_param = str(self.ctx.app.config_location.absolute())
- elif self.ctx.system is not None:
- if param_name == "system.name":
- resolved_param = self.ctx.system.name
- elif param_name == "system.description":
- resolved_param = self.ctx.system.description
- elif param_name == "system.config_dir" and self.ctx.system.config_location:
- resolved_param = str(self.ctx.system.config_location.absolute())
-
- if not resolved_param:
- resolved_param = self.param_matcher(param_name, cmd_name, resolved_params)
- return resolved_param
-
- def __call__(
- self,
- param_name: str,
- cmd_name: str | None = None,
- resolved_params: list[tuple[str | None, Param]] | None = None,
- ) -> str:
- """Resolve provided parameter."""
- return self.param_resolver(param_name, cmd_name, resolved_params)
-
-
-def validate_parameters(
- backend: Backend, command_names: list[str], params: list[str]
-) -> None:
- """Check parameters passed to backend."""
- for param in params:
- acceptable = any(
- backend.validate_parameter(command_name, param)
- for command_name in command_names
- if command_name in backend.commands
- )
-
- if not acceptable:
- backend_type = "System" if isinstance(backend, System) else "Application"
- raise ValueError(
- f"{backend_type} parameter '{param}' not valid for "
- f"command '{' or '.join(command_names)}'."
- )
-
-
-def get_application_by_name_and_system(
- application_name: str, system_name: str
-) -> Application:
- """Get application."""
- applications = get_application(application_name, system_name)
- if not applications:
- raise ValueError(
- f"Application '{application_name}' doesn't support the "
- f"system '{system_name}'."
- )
-
- if len(applications) != 1:
- raise ValueError(
- f"Error during getting application {application_name} for the "
- f"system {system_name}."
- )
-
- return applications[0]
-
-
-def get_application_and_system(
- application_name: str, system_name: str
-) -> tuple[Application, System]:
- """Return application and system by provided names."""
- system = get_system(system_name)
- if not system:
- raise ValueError(f"System {system_name} is not found.")
-
- application = get_application_by_name_and_system(application_name, system_name)
-
- return application, system
-
-
-def run_application(
- application_name: str,
- application_params: list[str],
- system_name: str,
- system_params: list[str],
-) -> ExecutionContext:
- """Run application on the provided system."""
- application, system = get_application_and_system(application_name, system_name)
- validate_parameters(application, ["run"], application_params)
- validate_parameters(system, ["run"], system_params)
-
- ctx = ExecutionContext(
- app=application,
- app_params=application_params,
- system=system,
- system_params=system_params,
- )
-
- logger.debug("Generating commands to execute")
- commands_to_run = ctx.system.build_command(
- "run", ctx.system_params, ctx.param_resolver
- )
-
- for command in commands_to_run:
- logger.debug("Running: %s", command)
- exit_code, ctx.stdout, ctx.stderr = ctx.system.run(command)
-
- if exit_code != 0:
- logger.warning("Application exited with exit code %i", exit_code)
-
- return ctx
diff --git a/src/mlia/backend/executor/fs.py b/src/mlia/backend/executor/fs.py
deleted file mode 100644
index 3fce19c..0000000
--- a/src/mlia/backend/executor/fs.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Module to host all file system related functions."""
-from __future__ import annotations
-
-import re
-import shutil
-from pathlib import Path
-from typing import Literal
-
-from mlia.utils.filesystem import get_mlia_resources
-
-ResourceType = Literal["applications", "systems"]
-
-
-def get_backend_resources() -> Path:
- """Get backend resources folder path."""
- return get_mlia_resources() / "backends"
-
-
-def get_backends_path(name: ResourceType) -> Path:
- """Return the absolute path of the specified resource.
-
- It uses importlib to return resources packaged with MANIFEST.in.
- """
- if not name:
- raise ResourceWarning("Resource name is not provided")
-
- resource_path = get_backend_resources() / name
- if resource_path.is_dir():
- return resource_path
-
- raise ResourceWarning(f"Resource '{name}' not found.")
-
-
-def copy_directory_content(source: Path, destination: Path) -> None:
- """Copy content of the source directory into destination directory."""
- for item in source.iterdir():
- src = source / item.name
- dest = destination / item.name
-
- if src.is_dir():
- shutil.copytree(src, dest)
- else:
- shutil.copy2(src, dest)
-
-
-def remove_resource(resource_directory: str, resource_type: ResourceType) -> None:
- """Remove resource data."""
- resources = get_backends_path(resource_type)
-
- resource_location = resources / resource_directory
- if not resource_location.exists():
- raise Exception(f"Resource {resource_directory} does not exist")
-
- if not resource_location.is_dir():
- raise Exception(f"Wrong resource {resource_directory}")
-
- shutil.rmtree(resource_location)
-
-
-def remove_directory(directory_path: Path | None) -> None:
- """Remove directory."""
- if not directory_path or not directory_path.is_dir():
- raise Exception("No directory path provided")
-
- shutil.rmtree(directory_path)
-
-
-def recreate_directory(directory_path: Path | None) -> None:
- """Recreate directory."""
- if not directory_path:
- raise Exception("No directory path provided")
-
- if directory_path.exists() and not directory_path.is_dir():
- raise Exception(
- f"Path {str(directory_path)} does exist and it is not a directory."
- )
-
- if directory_path.is_dir():
- remove_directory(directory_path)
-
- directory_path.mkdir()
-
-
-def valid_for_filename(value: str, replacement: str = "") -> str:
- """Replace non alpha numeric characters."""
- return re.sub(r"[^\w.]", replacement, value, flags=re.ASCII)
diff --git a/src/mlia/backend/executor/output_consumer.py b/src/mlia/backend/executor/output_consumer.py
deleted file mode 100644
index 3c3b132..0000000
--- a/src/mlia/backend/executor/output_consumer.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Output consumers module."""
-from __future__ import annotations
-
-import base64
-import json
-import re
-from typing import Protocol
-from typing import runtime_checkable
-
-
-@runtime_checkable
-class OutputConsumer(Protocol):
- """Protocol to consume output."""
-
- def feed(self, line: str) -> bool:
- """
- Feed a new line to be parsed.
-
- Return True if the line should be removed from the output.
- """
-
-
-class Base64OutputConsumer(OutputConsumer):
- """
- Parser to extract base64-encoded JSON from tagged standard output.
-
- Example of the tagged output:
- ```
- # Encoded JSON: {"test": 1234}
- <metrics>eyJ0ZXN0IjogMTIzNH0</metrics>
- ```
- """
-
- TAG_NAME = "metrics"
-
- def __init__(self) -> None:
- """Set up the regular expression to extract tagged strings."""
- self._regex = re.compile(rf"<{self.TAG_NAME}>(.*)</{self.TAG_NAME}>")
- self.parsed_output: list = []
-
- def feed(self, line: str) -> bool:
- """
- Parse the output line and save the decoded output.
-
- Returns True if the line contains tagged output.
-
- Example:
- Using the tagged output from the class docs the parser should collect
- the following:
- ```
- [
- {"test": 1234}
- ]
- ```
- """
- res_b64 = self._regex.search(line)
- if res_b64:
- res_json = base64.b64decode(res_b64.group(1), validate=True)
- res = json.loads(res_json)
- self.parsed_output.append(res)
- # Remove this line from the output, i.e. consume it, as it
- # does not contain any human readable content.
- return True
-
- return False
diff --git a/src/mlia/backend/executor/proc.py b/src/mlia/backend/executor/proc.py
deleted file mode 100644
index 39a0689..0000000
--- a/src/mlia/backend/executor/proc.py
+++ /dev/null
@@ -1,191 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Processes module.
-
-This module contains all classes and functions for dealing with Linux
-processes.
-"""
-from __future__ import annotations
-
-import datetime
-import logging
-import shlex
-import signal
-import tempfile
-import time
-from pathlib import Path
-from typing import Any
-
-from sh import Command
-from sh import CommandNotFound
-from sh import ErrorReturnCode
-from sh import RunningCommand
-
-from mlia.backend.executor.fs import valid_for_filename
-
-logger = logging.getLogger(__name__)
-
-
-class CommandFailedException(Exception):
- """Exception for failed command execution."""
-
-
-class ShellCommand:
- """Wrapper class for shell commands."""
-
- def run(
- self,
- cmd: str,
- *args: str,
- _cwd: Path | None = None,
- _tee: bool = True,
- _bg: bool = True,
- _out: Any = None,
- _err: Any = None,
- _search_paths: list[Path] | None = None,
- ) -> RunningCommand:
- """Run the shell command with the given arguments.
-
- There are special arguments that modify the behaviour of the process.
- _cwd: current working directory
- _tee: it redirects the stdout both to console and file
- _bg: if True, it runs the process in background and the command is not
- blocking.
- _out: use this object for stdout redirect,
- _err: use this object for stderr redirect,
- _search_paths: If presented used for searching executable
- """
- try:
- kwargs = {}
- if _cwd:
- kwargs["_cwd"] = str(_cwd)
- command = Command(cmd, _search_paths).bake(args, **kwargs)
- except CommandNotFound as error:
- logging.error("Command '%s' not found", error.args[0])
- raise error
-
- out, err = _out, _err
- if not _out and not _err:
- out, err = (str(item) for item in self.get_stdout_stderr_paths(cmd))
-
- return command(_out=out, _err=err, _tee=_tee, _bg=_bg, _bg_exc=False)
-
- @classmethod
- def get_stdout_stderr_paths(cls, cmd: str) -> tuple[Path, Path]:
- """Construct and returns the paths of stdout/stderr files."""
- timestamp = datetime.datetime.now().timestamp()
- base_path = Path(tempfile.mkdtemp(prefix="mlia-", suffix=f"{timestamp}"))
- base = base_path / f"{valid_for_filename(cmd, '_')}_{timestamp}"
- stdout = base.with_suffix(".out")
- stderr = base.with_suffix(".err")
- try:
- stdout.touch()
- stderr.touch()
- except FileNotFoundError as error:
- logging.error("File not found: %s", error.filename)
- raise error
- return stdout, stderr
-
-
-def parse_command(command: str, shell: str = "bash") -> list[str]:
- """Parse command."""
- cmd, *args = shlex.split(command, posix=True)
-
- if is_shell_script(cmd):
- args = [cmd] + args
- cmd = shell
-
- return [cmd] + args
-
-
-def execute_command( # pylint: disable=invalid-name
- command: str,
- cwd: Path,
- bg: bool = False,
- shell: str = "bash",
- out: Any = None,
- err: Any = None,
-) -> RunningCommand:
- """Execute shell command."""
- cmd, *args = parse_command(command, shell)
-
- search_paths = None
- if cmd != shell and (cwd / cmd).is_file():
- search_paths = [cwd]
-
- return ShellCommand().run(
- cmd, *args, _cwd=cwd, _bg=bg, _search_paths=search_paths, _out=out, _err=err
- )
-
-
-def is_shell_script(cmd: str) -> bool:
- """Check if command is shell script."""
- return cmd.endswith(".sh")
-
-
-def run_and_wait(
- command: str,
- cwd: Path,
- terminate_on_error: bool = False,
- out: Any = None,
- err: Any = None,
-) -> tuple[int, bytearray, bytearray]:
- """
- Run command and wait while it is executing.
-
- Returns a tuple: (exit_code, stdout, stderr)
- """
- running_cmd: RunningCommand | None = None
- try:
- running_cmd = execute_command(command, cwd, bg=True, out=out, err=err)
- return running_cmd.exit_code, running_cmd.stdout, running_cmd.stderr
- except ErrorReturnCode as cmd_failed:
- raise CommandFailedException() from cmd_failed
- except Exception as error:
- is_running = running_cmd is not None and running_cmd.is_alive()
- if terminate_on_error and is_running:
- logger.debug("Terminating ...")
- terminate_command(running_cmd)
-
- raise error
-
-
-def terminate_command(
- running_cmd: RunningCommand,
- wait: bool = True,
- wait_period: float = 0.5,
- number_of_attempts: int = 20,
-) -> None:
- """Terminate running command."""
- try:
- running_cmd.process.signal_group(signal.SIGINT)
- if wait:
- for _ in range(number_of_attempts):
- time.sleep(wait_period)
- if not running_cmd.is_alive():
- return
- logger.error(
- "Unable to terminate process %i. Sending SIGTERM...",
- running_cmd.process.pid,
- )
- running_cmd.process.signal_group(signal.SIGTERM)
- except ProcessLookupError:
- pass
-
-
-def print_command_stdout(command: RunningCommand) -> None:
- """Print the stdout of a command.
-
- The command has 2 states: running and done.
- If the command is running, the output is taken by the running process.
- If the command has ended its execution, the stdout is taken from stdout
- property
- """
- if command.is_alive():
- while True:
- try:
- print(command.next(), end="")
- except StopIteration:
- break
- else:
- print(command.stdout)
diff --git a/src/mlia/backend/executor/runner.py b/src/mlia/backend/executor/runner.py
deleted file mode 100644
index 2330fd9..0000000
--- a/src/mlia/backend/executor/runner.py
+++ /dev/null
@@ -1,98 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Module for backend runner."""
-from __future__ import annotations
-
-from dataclasses import dataclass
-from pathlib import Path
-
-from mlia.backend.executor.application import get_available_applications
-from mlia.backend.executor.application import install_application
-from mlia.backend.executor.execution import ExecutionContext
-from mlia.backend.executor.execution import run_application
-from mlia.backend.executor.system import get_available_systems
-from mlia.backend.executor.system import install_system
-
-
-@dataclass
-class ExecutionParams:
- """Application execution params."""
-
- application: str
- system: str
- application_params: list[str]
- system_params: list[str]
-
-
-class BackendRunner:
- """Backend runner."""
-
- def __init__(self) -> None:
- """Init BackendRunner instance."""
-
- @staticmethod
- def get_installed_systems() -> list[str]:
- """Get list of the installed systems."""
- return [system.name for system in get_available_systems()]
-
- @staticmethod
- def get_installed_applications(system: str | None = None) -> list[str]:
- """Get list of the installed application."""
- return [
- app.name
- for app in get_available_applications()
- if system is None or app.can_run_on(system)
- ]
-
- def is_application_installed(self, application: str, system: str) -> bool:
- """Return true if requested application installed."""
- return application in self.get_installed_applications(system)
-
- def is_system_installed(self, system: str) -> bool:
- """Return true if requested system installed."""
- return system in self.get_installed_systems()
-
- def systems_installed(self, systems: list[str]) -> bool:
- """Check if all provided systems are installed."""
- if not systems:
- return False
-
- installed_systems = self.get_installed_systems()
- return all(system in installed_systems for system in systems)
-
- def applications_installed(self, applications: list[str]) -> bool:
- """Check if all provided applications are installed."""
- if not applications:
- return False
-
- installed_apps = self.get_installed_applications()
- return all(app in installed_apps for app in applications)
-
- def all_installed(self, systems: list[str], apps: list[str]) -> bool:
- """Check if all provided artifacts are installed."""
- return self.systems_installed(systems) and self.applications_installed(apps)
-
- @staticmethod
- def install_system(system_path: Path) -> None:
- """Install system."""
- install_system(system_path)
-
- @staticmethod
- def install_application(app_path: Path) -> None:
- """Install application."""
- install_application(app_path)
-
- @staticmethod
- def run_application(execution_params: ExecutionParams) -> ExecutionContext:
- """Run requested application."""
- ctx = run_application(
- execution_params.application,
- execution_params.application_params,
- execution_params.system,
- execution_params.system_params,
- )
- return ctx
-
- @staticmethod
- def _params(name: str, params: list[str]) -> list[str]:
- return [p for item in [(name, param) for param in params] for p in item]
diff --git a/src/mlia/backend/executor/source.py b/src/mlia/backend/executor/source.py
deleted file mode 100644
index 6abc49f..0000000
--- a/src/mlia/backend/executor/source.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Contain source related classes and functions."""
-from __future__ import annotations
-
-import os
-import shutil
-import tarfile
-from abc import ABC
-from abc import abstractmethod
-from pathlib import Path
-from tarfile import TarFile
-
-from mlia.backend.executor.common import BACKEND_CONFIG_FILE
-from mlia.backend.executor.common import ConfigurationException
-from mlia.backend.executor.common import get_backend_config
-from mlia.backend.executor.common import is_backend_directory
-from mlia.backend.executor.common import load_config
-from mlia.backend.executor.config import BackendConfig
-from mlia.backend.executor.fs import copy_directory_content
-
-
-class Source(ABC):
- """Source class."""
-
- @abstractmethod
- def name(self) -> str | None:
- """Get source name."""
-
- @abstractmethod
- def config(self) -> BackendConfig | None:
- """Get configuration file content."""
-
- @abstractmethod
- def install_into(self, destination: Path) -> None:
- """Install source into destination directory."""
-
- @abstractmethod
- def create_destination(self) -> bool:
- """Return True if destination folder should be created before installation."""
-
-
-class DirectorySource(Source):
- """DirectorySource class."""
-
- def __init__(self, directory_path: Path) -> None:
- """Create the DirectorySource instance."""
- assert isinstance(directory_path, Path)
- self.directory_path = directory_path
-
- def name(self) -> str:
- """Return name of source."""
- return self.directory_path.name
-
- def config(self) -> BackendConfig | None:
- """Return configuration file content."""
- if not is_backend_directory(self.directory_path):
- raise ConfigurationException("No configuration file found")
-
- config_file = get_backend_config(self.directory_path)
- return load_config(config_file)
-
- def install_into(self, destination: Path) -> None:
- """Install source into destination directory."""
- if not destination.is_dir():
- raise ConfigurationException(f"Wrong destination {destination}.")
-
- if not self.directory_path.is_dir():
- raise ConfigurationException(
- f"Directory {self.directory_path} does not exist."
- )
-
- copy_directory_content(self.directory_path, destination)
-
- def create_destination(self) -> bool:
- """Return True if destination folder should be created before installation."""
- return True
-
-
-class TarArchiveSource(Source):
- """TarArchiveSource class."""
-
- def __init__(self, archive_path: Path) -> None:
- """Create the TarArchiveSource class."""
- assert isinstance(archive_path, Path)
- self.archive_path = archive_path
- self._config: BackendConfig | None = None
- self._has_top_level_folder: bool | None = None
- self._name: str | None = None
-
- def _read_archive_content(self) -> None:
- """Read various information about archive."""
- # get source name from archive name (everything without extensions)
- extensions = "".join(self.archive_path.suffixes)
- self._name = self.archive_path.name.rstrip(extensions)
-
- if not self.archive_path.exists():
- return
-
- with self._open(self.archive_path) as archive:
- try:
- config_entry = archive.getmember(BACKEND_CONFIG_FILE)
- self._has_top_level_folder = False
- except KeyError as error_no_config:
- try:
- archive_entries = archive.getnames()
- entries_common_prefix = os.path.commonprefix(archive_entries)
- top_level_dir = entries_common_prefix.rstrip("/")
-
- if not top_level_dir:
- raise RuntimeError(
- "Archive has no top level directory"
- ) from error_no_config
-
- config_path = f"{top_level_dir}/{BACKEND_CONFIG_FILE}"
-
- config_entry = archive.getmember(config_path)
- self._has_top_level_folder = True
- self._name = top_level_dir
- except (KeyError, RuntimeError) as error_no_root_dir_or_config:
- raise ConfigurationException(
- "No configuration file found"
- ) from error_no_root_dir_or_config
-
- content = archive.extractfile(config_entry)
- self._config = load_config(content)
-
- def config(self) -> BackendConfig | None:
- """Return configuration file content."""
- if self._config is None:
- self._read_archive_content()
-
- return self._config
-
- def name(self) -> str | None:
- """Return name of the source."""
- if self._name is None:
- self._read_archive_content()
-
- return self._name
-
- def create_destination(self) -> bool:
- """Return True if destination folder must be created before installation."""
- if self._has_top_level_folder is None:
- self._read_archive_content()
-
- return not self._has_top_level_folder
-
- def install_into(self, destination: Path) -> None:
- """Install source into destination directory."""
- if not destination.is_dir():
- raise ConfigurationException(f"Wrong destination {destination}.")
-
- with self._open(self.archive_path) as archive:
- archive.extractall(destination)
-
- def _open(self, archive_path: Path) -> TarFile:
- """Open archive file."""
- if not archive_path.is_file():
- raise ConfigurationException(f"File {archive_path} does not exist.")
-
- if archive_path.name.endswith("tar.gz") or archive_path.name.endswith("tgz"):
- mode = "r:gz"
- else:
- raise ConfigurationException(f"Unsupported archive type {archive_path}.")
-
- # The returned TarFile object can be used as a context manager (using
- # 'with') by the calling instance.
- return tarfile.open( # pylint: disable=consider-using-with
- self.archive_path, mode=mode
- )
-
-
-def get_source(source_path: Path) -> TarArchiveSource | DirectorySource:
- """Return appropriate source instance based on provided source path."""
- if source_path.is_file():
- return TarArchiveSource(source_path)
-
- if source_path.is_dir():
- return DirectorySource(source_path)
-
- raise ConfigurationException(f"Unable to read {source_path}.")
-
-
-def create_destination_and_install(source: Source, resource_path: Path) -> None:
- """Create destination directory and install source.
-
- This function is used for actual installation of system/backend New
- directory will be created inside :resource_path: if needed If for example
- archive contains top level folder then no need to create new directory
- """
- destination = resource_path
- create_destination = source.create_destination()
-
- if create_destination:
- name = source.name()
- if not name:
- raise ConfigurationException("Unable to get source name.")
-
- destination = resource_path / name
- destination.mkdir()
- try:
- source.install_into(destination)
- except Exception as error:
- if create_destination:
- shutil.rmtree(destination)
- raise error
diff --git a/src/mlia/backend/executor/system.py b/src/mlia/backend/executor/system.py
deleted file mode 100644
index a5ecf19..0000000
--- a/src/mlia/backend/executor/system.py
+++ /dev/null
@@ -1,178 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""System backend module."""
-from __future__ import annotations
-
-from pathlib import Path
-from typing import Any
-from typing import cast
-from typing import List
-
-from mlia.backend.executor.common import Backend
-from mlia.backend.executor.common import ConfigurationException
-from mlia.backend.executor.common import get_backend_configs
-from mlia.backend.executor.common import get_backend_directories
-from mlia.backend.executor.common import load_config
-from mlia.backend.executor.common import remove_backend
-from mlia.backend.executor.config import SystemConfig
-from mlia.backend.executor.fs import get_backends_path
-from mlia.backend.executor.proc import run_and_wait
-from mlia.backend.executor.source import create_destination_and_install
-from mlia.backend.executor.source import get_source
-
-
-class System(Backend):
- """System class."""
-
- def __init__(self, config: SystemConfig) -> None:
- """Construct the System class using the dictionary passed."""
- super().__init__(config)
-
- self._setup_reporting(config)
-
- def _setup_reporting(self, config: SystemConfig) -> None:
- self.reporting = config.get("reporting")
-
- def run(self, command: str) -> tuple[int, bytearray, bytearray]:
- """
- Run command on the system.
-
- Returns a tuple: (exit_code, stdout, stderr)
- """
- cwd = self.config_location
- if not isinstance(cwd, Path) or not cwd.is_dir():
- raise ConfigurationException(
- f"System has invalid config location: {cwd}",
- )
-
- stdout = bytearray()
- stderr = bytearray()
-
- return run_and_wait(
- command,
- cwd=cwd,
- terminate_on_error=True,
- out=stdout,
- err=stderr,
- )
-
- def __eq__(self, other: object) -> bool:
- """Overload operator ==."""
- if not isinstance(other, System):
- return False
-
- return super().__eq__(other) and self.name == other.name
-
- def get_details(self) -> dict[str, Any]:
- """Return a dictionary with all relevant information of a System."""
- output = {
- "type": "system",
- "name": self.name,
- "description": self.description,
- "commands": self._get_command_details(),
- "annotations": self.annotations,
- }
-
- return output
-
-
-def get_available_systems_directory_names() -> list[str]:
- """Return a list of directory names for all avialable systems."""
- return [entry.name for entry in get_backend_directories("systems")]
-
-
-def get_available_systems() -> list[System]:
- """Return a list with all available systems."""
- available_systems = []
- for config_json in get_backend_configs("systems"):
- config_entries = cast(List[SystemConfig], (load_config(config_json)))
- for config_entry in config_entries:
- config_entry["config_location"] = config_json.parent.absolute()
- system = load_system(config_entry)
- available_systems.append(system)
-
- return sorted(available_systems, key=lambda system: system.name)
-
-
-def get_system(system_name: str) -> System:
- """Return a system instance with the same name passed as argument."""
- available_systems = get_available_systems()
- for system in available_systems:
- if system_name == system.name:
- return system
- raise ConfigurationException(f"System '{system_name}' not found.")
-
-
-def install_system(source_path: Path) -> None:
- """Install new system."""
- try:
- source = get_source(source_path)
- config = cast(List[SystemConfig], source.config())
- systems_to_install = [load_system(entry) for entry in config]
- except Exception as error:
- raise ConfigurationException("Unable to read system definition") from error
-
- if not systems_to_install:
- raise ConfigurationException("No system definition found")
-
- available_systems = get_available_systems()
- already_installed = [s for s in systems_to_install if s in available_systems]
- if already_installed:
- names = [system.name for system in already_installed]
- raise ConfigurationException(
- f"Systems [{','.join(names)}] are already installed."
- )
-
- create_destination_and_install(source, get_backends_path("systems"))
-
-
-def remove_system(directory_name: str) -> None:
- """Remove system."""
- remove_backend(directory_name, "systems")
-
-
-def load_system(config: SystemConfig) -> System:
- """Load system based on it's execution type."""
- populate_shared_params(config)
-
- return System(config)
-
-
-def populate_shared_params(config: SystemConfig) -> None:
- """Populate command parameters with shared parameters."""
- user_params = config.get("user_params")
- if not user_params or "shared" not in user_params:
- return
-
- shared_user_params = user_params["shared"]
- if not shared_user_params:
- return
-
- only_aliases = all(p.get("alias") for p in shared_user_params)
- if not only_aliases:
- raise ConfigurationException("All shared parameters should have aliases")
-
- commands = config.get("commands", {})
- for cmd_name in ["run"]:
- command = commands.get(cmd_name)
- if command is None:
- commands[cmd_name] = []
- cmd_user_params = user_params.get(cmd_name)
- if not cmd_user_params:
- cmd_user_params = shared_user_params
- else:
- only_aliases = all(p.get("alias") for p in cmd_user_params)
- if not only_aliases:
- raise ConfigurationException(
- f"All parameters for command {cmd_name} should have aliases."
- )
- merged_by_alias = {
- **{p.get("alias"): p for p in shared_user_params},
- **{p.get("alias"): p for p in cmd_user_params},
- }
- cmd_user_params = list(merged_by_alias.values())
-
- user_params[cmd_name] = cmd_user_params
-
- config["commands"] = commands
- del user_params["shared"]
diff --git a/src/mlia/backend/install.py b/src/mlia/backend/install.py
index 37a277b..c76e3e2 100644
--- a/src/mlia/backend/install.py
+++ b/src/mlia/backend/install.py
@@ -11,17 +11,12 @@ from abc import abstractmethod
from dataclasses import dataclass
from pathlib import Path
from typing import Callable
-from typing import Iterable
from typing import Optional
from typing import Union
-from mlia.backend.executor.runner import BackendRunner
-from mlia.backend.executor.system import remove_system
+from mlia.backend.repo import get_backend_repository
from mlia.utils.download import DownloadArtifact
from mlia.utils.filesystem import all_files_exist
-from mlia.utils.filesystem import all_paths_valid
-from mlia.utils.filesystem import copy_all
-from mlia.utils.filesystem import get_mlia_resources
from mlia.utils.filesystem import temp_directory
from mlia.utils.filesystem import working_directory
from mlia.utils.py_manager import get_package_manager
@@ -29,52 +24,6 @@ from mlia.utils.py_manager import get_package_manager
logger = logging.getLogger(__name__)
-# Mapping backend -> device_type -> system_name
-_SUPPORTED_SYSTEMS = {
- "Corstone-300": {
- "Ethos-U55": "Corstone-300: Cortex-M55+Ethos-U55",
- "Ethos-U65": "Corstone-300: Cortex-M55+Ethos-U65",
- "ethos-u55": "Corstone-300: Cortex-M55+Ethos-U55",
- "ethos-u65": "Corstone-300: Cortex-M55+Ethos-U65",
- },
- "Corstone-310": {
- "Ethos-U55": "Corstone-310: Cortex-M85+Ethos-U55",
- "Ethos-U65": "Corstone-310: Cortex-M85+Ethos-U65",
- "ethos-u55": "Corstone-310: Cortex-M85+Ethos-U55",
- "ethos-u65": "Corstone-310: Cortex-M85+Ethos-U65",
- },
-}
-
-# Mapping system_name -> application
-_SYSTEM_TO_APP_MAP = {
- "Corstone-300: Cortex-M55+Ethos-U55": "Generic Inference Runner: Ethos-U55",
- "Corstone-300: Cortex-M55+Ethos-U65": "Generic Inference Runner: Ethos-U65",
- "Corstone-310: Cortex-M85+Ethos-U55": "Generic Inference Runner: Ethos-U55",
- "Corstone-310: Cortex-M85+Ethos-U65": "Generic Inference Runner: Ethos-U65",
-}
-
-
-def get_system_name(backend: str, device_type: str) -> str:
- """Get the system name for the given backend and device type."""
- return _SUPPORTED_SYSTEMS[backend][device_type]
-
-
-def get_application_name(system_name: str) -> str:
- """Get application name for the provided system name."""
- return _SYSTEM_TO_APP_MAP[system_name]
-
-
-def get_all_system_names(backend: str) -> list[str]:
- """Get all systems supported by the backend."""
- return list(_SUPPORTED_SYSTEMS.get(backend, {}).values())
-
-
-def get_all_application_names(backend: str) -> list[str]:
- """Get all applications supported by the backend."""
- app_set = {_SYSTEM_TO_APP_MAP[sys] for sys in get_all_system_names(backend)}
- return list(app_set)
-
-
@dataclass
class InstallFromPath:
"""Installation from the local path."""
@@ -95,29 +44,24 @@ InstallationType = Union[InstallFromPath, DownloadAndInstall]
class Installation(ABC):
"""Base class for the installation process of the backends."""
- @property
- @abstractmethod
- def name(self) -> str:
- """Return name of the backend."""
-
- @property
- @abstractmethod
- def description(self) -> str:
- """Return description of the backend."""
+ def __init__(self, name: str, description: str) -> None:
+ """Init the installation."""
+ self.name = name
+ self.description = description
@property
@abstractmethod
def could_be_installed(self) -> bool:
- """Return true if backend could be installed in current environment."""
+ """Check if backend could be installed in current environment."""
@property
@abstractmethod
def already_installed(self) -> bool:
- """Return true if backend is already installed."""
+ """Check if backend is already installed."""
@abstractmethod
def supports(self, install_type: InstallationType) -> bool:
- """Return true if installation supports requested installation type."""
+ """Check if installation supports requested installation type."""
@abstractmethod
def install(self, install_type: InstallationType) -> None:
@@ -134,103 +78,53 @@ class BackendInfo:
backend_path: Path
copy_source: bool = True
- system_config: str | None = None
+ settings: dict | None = None
PathChecker = Callable[[Path], Optional[BackendInfo]]
BackendInstaller = Callable[[bool, Path], Path]
-class BackendMetadata:
- """Backend installation metadata."""
+class BackendInstallation(Installation):
+ """Backend installation."""
def __init__(
self,
name: str,
description: str,
- system_config: str,
- apps_resources: list[str],
fvp_dir_name: str,
download_artifact: DownloadArtifact | None,
- supported_platforms: list[str] | None = None,
+ supported_platforms: list[str] | None,
+ path_checker: PathChecker,
+ backend_installer: BackendInstaller | None,
) -> None:
- """
- Initialize BackendMetadata.
+ """Init the backend installation."""
+ super().__init__(name, description)
- Members expected_systems and expected_apps are filled automatically.
- """
- self.name = name
- self.description = description
- self.system_config = system_config
- self.apps_resources = apps_resources
self.fvp_dir_name = fvp_dir_name
self.download_artifact = download_artifact
self.supported_platforms = supported_platforms
-
- self.expected_systems = get_all_system_names(name)
- self.expected_apps = get_all_application_names(name)
-
- @property
- def expected_resources(self) -> Iterable[Path]:
- """Return list of expected resources."""
- resources = [self.system_config, *self.apps_resources]
-
- return (get_mlia_resources() / resource for resource in resources)
-
- @property
- def supported_platform(self) -> bool:
- """Return true if current platform supported."""
- if not self.supported_platforms:
- return True
-
- return platform.system() in self.supported_platforms
-
-
-class BackendInstallation(Installation):
- """Backend installation."""
-
- def __init__(
- self,
- backend_runner: BackendRunner,
- metadata: BackendMetadata,
- path_checker: PathChecker,
- backend_installer: BackendInstaller | None,
- ) -> None:
- """Init the backend installation."""
- self.backend_runner = backend_runner
- self.metadata = metadata
self.path_checker = path_checker
self.backend_installer = backend_installer
@property
- def name(self) -> str:
- """Return name of the backend."""
- return self.metadata.name
-
- @property
- def description(self) -> str:
- """Return description of the backend."""
- return self.metadata.description
-
- @property
def already_installed(self) -> bool:
"""Return true if backend already installed."""
- return self.backend_runner.all_installed(
- self.metadata.expected_systems, self.metadata.expected_apps
- )
+ backend_repo = get_backend_repository()
+ return backend_repo.is_backend_installed(self.name)
@property
def could_be_installed(self) -> bool:
"""Return true if backend could be installed."""
- if not self.metadata.supported_platform:
- return False
-
- return all_paths_valid(self.metadata.expected_resources)
+ return (
+ not self.supported_platforms
+ or platform.system() in self.supported_platforms
+ )
def supports(self, install_type: InstallationType) -> bool:
"""Return true if backends supported type of the installation."""
if isinstance(install_type, DownloadAndInstall):
- return self.metadata.download_artifact is not None
+ return self.download_artifact is not None
if isinstance(install_type, InstallFromPath):
return self.path_checker(install_type.backend_path) is not None
@@ -240,41 +134,38 @@ class BackendInstallation(Installation):
def install(self, install_type: InstallationType) -> None:
"""Install the backend."""
if isinstance(install_type, DownloadAndInstall):
- download_artifact = self.metadata.download_artifact
- assert download_artifact is not None, "No artifact provided"
+ assert self.download_artifact is not None, "No artifact provided"
- self.download_and_install(download_artifact, install_type.eula_agreement)
+ self._download_and_install(
+ self.download_artifact, install_type.eula_agreement
+ )
elif isinstance(install_type, InstallFromPath):
- backend_path = self.path_checker(install_type.backend_path)
- assert backend_path is not None, "Unable to resolve backend path"
+ backend_info = self.path_checker(install_type.backend_path)
- self.install_from(backend_path)
+ assert backend_info is not None, "Unable to resolve backend path"
+ self._install_from(backend_info)
else:
raise Exception(f"Unable to install {install_type}")
- def install_from(self, backend_info: BackendInfo) -> None:
+ def _install_from(self, backend_info: BackendInfo) -> None:
"""Install backend from the directory."""
- mlia_resources = get_mlia_resources()
-
- with temp_directory() as tmpdir:
- fvp_dist_dir = tmpdir / self.metadata.fvp_dir_name
-
- system_config = self.metadata.system_config
- if backend_info.system_config:
- system_config = backend_info.system_config
-
- resources_to_copy = [mlia_resources / system_config]
- if backend_info.copy_source:
- resources_to_copy.append(backend_info.backend_path)
-
- copy_all(*resources_to_copy, dest=fvp_dist_dir)
-
- self.backend_runner.install_system(fvp_dist_dir)
-
- for app in self.metadata.apps_resources:
- self.backend_runner.install_application(mlia_resources / app)
+ backend_repo = get_backend_repository()
+
+ if backend_info.copy_source:
+ backend_repo.copy_backend(
+ self.name,
+ backend_info.backend_path,
+ self.fvp_dir_name,
+ backend_info.settings,
+ )
+ else:
+ backend_repo.add_backend(
+ self.name,
+ backend_info.backend_path,
+ backend_info.settings,
+ )
- def download_and_install(
+ def _download_and_install(
self, download_artifact: DownloadArtifact, eula_agrement: bool
) -> None:
"""Download and install the backend."""
@@ -288,11 +179,10 @@ class BackendInstallation(Installation):
with tarfile.open(downloaded_to) as archive:
archive.extractall(dist_dir)
- assert self.backend_installer, (
- f"Backend '{self.metadata.name}' does not support "
- "download and installation."
- )
- backend_path = self.backend_installer(eula_agrement, dist_dir)
+ backend_path = dist_dir
+ if self.backend_installer:
+ backend_path = self.backend_installer(eula_agrement, dist_dir)
+
if self.path_checker(backend_path) is None:
raise Exception("Downloaded artifact has invalid structure")
@@ -300,18 +190,23 @@ class BackendInstallation(Installation):
def uninstall(self) -> None:
"""Uninstall the backend."""
- remove_system(self.metadata.fvp_dir_name)
+ backend_repo = get_backend_repository()
+ backend_repo.remove_backend(self.name)
class PackagePathChecker:
"""Package path checker."""
def __init__(
- self, expected_files: list[str], backend_subfolder: str | None = None
+ self,
+ expected_files: list[str],
+ backend_subfolder: str | None = None,
+ settings: dict = None,
) -> None:
"""Init the path checker."""
self.expected_files = expected_files
self.backend_subfolder = backend_subfolder
+ self.settings = settings
def __call__(self, backend_path: Path) -> BackendInfo | None:
"""Check if directory contains all expected files."""
@@ -319,15 +214,14 @@ class PackagePathChecker:
if not all_files_exist(resolved_paths):
return None
+ actual_backend_path = backend_path
if self.backend_subfolder:
subfolder = backend_path / self.backend_subfolder
- if not subfolder.is_dir():
- return None
+ if subfolder.is_dir():
+ actual_backend_path = subfolder
- return BackendInfo(subfolder)
-
- return BackendInfo(backend_path)
+ return BackendInfo(actual_backend_path, settings=self.settings)
class StaticPathChecker:
@@ -338,13 +232,13 @@ class StaticPathChecker:
static_backend_path: Path,
expected_files: list[str],
copy_source: bool = False,
- system_config: str | None = None,
+ settings: dict | None = None,
) -> None:
"""Init static path checker."""
self.static_backend_path = static_backend_path
self.expected_files = expected_files
self.copy_source = copy_source
- self.system_config = system_config
+ self.settings = settings
def __call__(self, backend_path: Path) -> BackendInfo | None:
"""Check if directory equals static backend path with all expected files."""
@@ -358,7 +252,7 @@ class StaticPathChecker:
return BackendInfo(
backend_path,
copy_source=self.copy_source,
- system_config=self.system_config,
+ settings=self.settings,
)
@@ -392,8 +286,8 @@ class PyPackageBackendInstallation(Installation):
expected_packages: list[str],
) -> None:
"""Init the backend installation."""
- self._name = name
- self._description = description
+ super().__init__(name, description)
+
self._packages_to_install = packages_to_install
self._packages_to_uninstall = packages_to_uninstall
self._expected_packages = expected_packages
@@ -401,16 +295,6 @@ class PyPackageBackendInstallation(Installation):
self.package_manager = get_package_manager()
@property
- def name(self) -> str:
- """Return name of the backend."""
- return self._name
-
- @property
- def description(self) -> str:
- """Return description of the backend."""
- return self._description
-
- @property
def could_be_installed(self) -> bool:
"""Check if backend could be installed."""
return True
diff --git a/src/mlia/backend/manager.py b/src/mlia/backend/manager.py
index c02dc6e..b0fa919 100644
--- a/src/mlia/backend/manager.py
+++ b/src/mlia/backend/manager.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Module for installation process."""
from __future__ import annotations
@@ -9,10 +9,12 @@ from abc import abstractmethod
from pathlib import Path
from typing import Callable
+from mlia.backend.corstone.install import get_corstone_installations
from mlia.backend.install import DownloadAndInstall
from mlia.backend.install import Installation
from mlia.backend.install import InstallationType
from mlia.backend.install import InstallFromPath
+from mlia.backend.tosa_checker.install import get_tosa_backend_installation
from mlia.core.errors import ConfigurationError
from mlia.core.errors import InternalError
from mlia.utils.misc import yes
@@ -269,3 +271,11 @@ class DefaultInstallationManager(InstallationManager, InstallationFiltersMixin):
installations = self.already_installed(backend_name)
return len(installations) == 1
+
+
+def get_installation_manager(noninteractive: bool = False) -> InstallationManager:
+ """Return installation manager."""
+ backends = get_corstone_installations()
+ backends.append(get_tosa_backend_installation())
+
+ return DefaultInstallationManager(backends, noninteractive=noninteractive)
diff --git a/src/mlia/backend/repo.py b/src/mlia/backend/repo.py
new file mode 100644
index 0000000..3dd2e57
--- /dev/null
+++ b/src/mlia/backend/repo.py
@@ -0,0 +1,190 @@
+# SPDX-FileCopyrightText: Copyright 2023, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Module for backend repository.
+
+Backend repository is responsible for managing backends
+(apart from python package based) that have been installed
+via command "mlia-backend".
+
+Repository has associated directory (by default ~/.mlia) and
+configuration file (by default ~/.mlia/mlia_config.json). In
+configuration file repository keeps track of installed backends
+and their settings. Backend settings could be used by MLIA for
+correct instantiation of the backend.
+
+If backend is removed then repository removes corresponding record
+from configuration file along with backend files if needed.
+"""
+from __future__ import annotations
+
+import json
+import shutil
+from pathlib import Path
+
+from mlia.utils.filesystem import copy_all
+
+
+class _ConfigFile:
+ """Configuration file for backend repository."""
+
+ def __init__(self, config_file: Path) -> None:
+ """Init configuration file."""
+ self.config_file = config_file
+ self.config: dict = {"backends": []}
+
+ if self.exists():
+ content = self.config_file.read_text()
+ self.config = json.loads(content)
+
+ def exists(self) -> bool:
+ """Check if configuration file exists."""
+ return self.config_file.is_file()
+
+ def save(self) -> None:
+ """Save configuration."""
+ content = json.dumps(self.config, indent=4)
+ self.config_file.write_text(content)
+
+ def add_backend(
+ self,
+ backend_name: str,
+ settings: dict,
+ ) -> None:
+ """Add backend settings to configuration file."""
+ item = {"name": backend_name, "settings": settings}
+ self.config["backends"].append(item)
+
+ self.save()
+
+ def remove_backend(self, backend_name: str) -> None:
+ """Remove backend settings."""
+ backend = self._get_backend(backend_name)
+
+ if backend:
+ self.config["backends"].remove(backend)
+
+ self.save()
+
+ def backend_exists(self, backend_name: str) -> bool:
+ """Check if backend exists in configuration file."""
+ return self._get_backend(backend_name) is not None
+
+ def _get_backend(self, backend_name: str) -> dict | None:
+ """Find backend settings by backend name."""
+ find_backend = (
+ item for item in self.config["backends"] if item["name"] == backend_name
+ )
+
+ return next(find_backend, None)
+
+ def get_backend_settings(self, backend_name: str) -> dict | None:
+ """Get backend settings."""
+ backend = self._get_backend(backend_name)
+
+ return backend["settings"] if backend else None
+
+
+class BackendRepository:
+ """Repository for keeping track of the installed backends."""
+
+ def __init__(
+ self,
+ repository: Path,
+ config_filename: str = "mlia_config.json",
+ ) -> None:
+ """Init repository instance."""
+ self.repository = repository
+ self.config_file = _ConfigFile(repository / config_filename)
+
+ self._init_repo()
+
+ def copy_backend(
+ self,
+ backend_name: str,
+ backend_path: Path,
+ backend_dir_name: str,
+ settings: dict | None = None,
+ ) -> None:
+ """Copy backend files into repository."""
+ repo_backend_path = self._get_backend_path(backend_dir_name)
+
+ if repo_backend_path.exists():
+ raise Exception(f"Unable to copy backend files for {backend_name}.")
+
+ copy_all(backend_path, dest=repo_backend_path)
+
+ settings = settings or {}
+ settings["backend_dir"] = backend_dir_name
+
+ self.config_file.add_backend(backend_name, settings)
+
+ def add_backend(
+ self,
+ backend_name: str,
+ backend_path: Path,
+ settings: dict | None = None,
+ ) -> None:
+ """Add backend to repository."""
+ if self.is_backend_installed(backend_name):
+ raise Exception(f"Backend {backend_name} already installed.")
+
+ settings = settings or {}
+ settings["backend_path"] = backend_path.absolute().as_posix()
+
+ self.config_file.add_backend(backend_name, settings)
+
+ def remove_backend(self, backend_name: str) -> None:
+ """Remove backend from repository."""
+ settings = self.config_file.get_backend_settings(backend_name)
+
+ if not settings:
+ raise Exception(f"Backend {backend_name} is not installed.")
+
+ if "backend_dir" in settings:
+ repo_backend_path = self._get_backend_path(settings["backend_dir"])
+ shutil.rmtree(repo_backend_path)
+
+ self.config_file.remove_backend(backend_name)
+
+ def is_backend_installed(self, backend_name: str) -> bool:
+ """Check if backend is installed."""
+ return self.config_file.backend_exists(backend_name)
+
+ def get_backend_settings(self, backend_name: str) -> tuple[Path, dict]:
+ """Return backend settings."""
+ settings = self.config_file.get_backend_settings(backend_name)
+
+ if not settings:
+ raise Exception(f"Backend {backend_name} is not installed.")
+
+ if backend_dir := settings.get("backend_dir", None):
+ return self._get_backend_path(backend_dir), settings
+
+ if backend_path := settings.get("backend_path", None):
+ return Path(backend_path), settings
+
+ raise Exception(f"Unable to resolve path of the backend {backend_name}.")
+
+ def _get_backend_path(self, backend_dir_name: str) -> Path:
+ """Return path to backend."""
+ return self.repository.joinpath("backends", backend_dir_name)
+
+ def _init_repo(self) -> None:
+ """Init repository."""
+ if self.repository.exists():
+ if not self.config_file.exists():
+ raise Exception(
+ f"Directory {self.repository} could not be used as MLIA repository."
+ )
+ else:
+ self.repository.mkdir()
+ self.repository.joinpath("backends").mkdir()
+
+ self.config_file.save()
+
+
+def get_backend_repository(
+ repository: Path = Path.home() / ".mlia",
+) -> BackendRepository:
+ """Return backend repository."""
+ return BackendRepository(repository)
diff --git a/src/mlia/backend/tosa_checker/__init__.py b/src/mlia/backend/tosa_checker/__init__.py
index c06a122..e11034f 100644
--- a/src/mlia/backend/tosa_checker/__init__.py
+++ b/src/mlia/backend/tosa_checker/__init__.py
@@ -8,7 +8,7 @@ from mlia.backend.registry import registry
from mlia.core.common import AdviceCategory
registry.register(
- "TOSA-Checker",
+ "tosa-checker",
BackendConfiguration(
supported_advice=[AdviceCategory.COMPATIBILITY],
supported_systems=[System.LINUX_AMD64],
diff --git a/src/mlia/cli/command_validators.py b/src/mlia/cli/command_validators.py
index 8eb966b..23101e0 100644
--- a/src/mlia/cli/command_validators.py
+++ b/src/mlia/cli/command_validators.py
@@ -23,18 +23,12 @@ def validate_backend(
compatible with each other.
It assumes that prior checks where made on the validity of the target-profile.
"""
- target_map = {
- "ethos-u55": "Ethos-U55",
- "ethos-u65": "Ethos-U65",
- "cortex-a": "Cortex-A",
- "tosa": "TOSA",
- }
target = get_target(target_profile)
if not backend:
return get_default_backends_dict()[target]
- compatible_backends = supported_backends(target_map[target])
+ compatible_backends = supported_backends(target)
nor_backend = list(map(normalize_string, backend))
nor_compat_backend = list(map(normalize_string, compatible_backends))
diff --git a/src/mlia/cli/config.py b/src/mlia/cli/config.py
index 0dac3e8..433300c 100644
--- a/src/mlia/cli/config.py
+++ b/src/mlia/cli/config.py
@@ -4,16 +4,9 @@
from __future__ import annotations
import logging
-from functools import lru_cache
-from typing import List
-from typing import Optional
-from typing import TypedDict
-from mlia.backend.corstone.install import get_corstone_installations
-from mlia.backend.manager import DefaultInstallationManager
-from mlia.backend.manager import InstallationManager
-from mlia.backend.registry import get_supported_backends
-from mlia.backend.tosa_checker.install import get_tosa_backend_installation
+from mlia.backend.manager import get_installation_manager
+from mlia.target.registry import all_supported_backends
logger = logging.getLogger(__name__)
@@ -21,31 +14,24 @@ DEFAULT_PRUNING_TARGET = 0.5
DEFAULT_CLUSTERING_TARGET = 32
-def get_installation_manager(noninteractive: bool = False) -> InstallationManager:
- """Return installation manager."""
- backends = get_corstone_installations()
- backends.append(get_tosa_backend_installation())
-
- return DefaultInstallationManager(backends, noninteractive=noninteractive)
-
-
-@lru_cache
def get_available_backends() -> list[str]:
"""Return list of the available backends."""
+ available_backends = ["Vela", "ArmNNTFLiteDelegate"]
+
# Add backends using backend manager
manager = get_installation_manager()
- available_backends = [
+ available_backends.extend(
backend
- for backend in get_supported_backends()
+ for backend in all_supported_backends()
if manager.backend_installed(backend)
- ]
+ )
return available_backends
# List of mutually exclusive Corstone backends ordered by priority
_CORSTONE_EXCLUSIVE_PRIORITY = ("Corstone-310", "Corstone-300")
-_NON_ETHOS_U_BACKENDS = ("TOSA-Checker", "ArmNNTFLiteDelegate")
+_NON_ETHOS_U_BACKENDS = ("tosa-checker", "ArmNNTFLiteDelegate")
def get_ethos_u_default_backends(backends: list[str]) -> list[str]:
@@ -70,29 +56,14 @@ def get_default_backends() -> list[str]:
return backends
-def is_corstone_backend(backend: str) -> bool:
- """Check if the given backend is a Corstone backend."""
- return backend in _CORSTONE_EXCLUSIVE_PRIORITY
-
-
-BackendCompatibility = TypedDict(
- "BackendCompatibility",
- {
- "partial-match": bool,
- "backends": List[str],
- "default-return": Optional[List[str]],
- "use-custom-return": bool,
- "custom-return": Optional[List[str]],
- },
-)
-
-
def get_default_backends_dict() -> dict[str, list[str]]:
"""Return default backends for all targets."""
- ethos_u_defaults = get_ethos_u_default_backends(get_default_backends())
+ default_backends = get_default_backends()
+ ethos_u_defaults = get_ethos_u_default_backends(default_backends)
+
return {
"ethos-u55": ethos_u_defaults,
"ethos-u65": ethos_u_defaults,
"tosa": ["tosa-checker"],
- "cortex-a": ["armnn-tflitedelegate"],
+ "cortex-a": ["ArmNNTFLiteDelegate"],
}
diff --git a/src/mlia/cli/main.py b/src/mlia/cli/main.py
index 4a91b08..76f199e 100644
--- a/src/mlia/cli/main.py
+++ b/src/mlia/cli/main.py
@@ -74,7 +74,7 @@ def get_commands() -> list[CommandInfo]:
partial(add_target_options, profiles_to_skip=["tosa", "cortex-a"]),
partial(
add_backend_options,
- backends_to_skip=["tosa-checker", "armnn-tflitedelegate"],
+ backends_to_skip=["tosa-checker", "ArmNNTFLiteDelegate"],
),
add_multi_optimization_options,
add_output_options,
diff --git a/src/mlia/cli/options.py b/src/mlia/cli/options.py
index dac8c82..421533a 100644
--- a/src/mlia/cli/options.py
+++ b/src/mlia/cli/options.py
@@ -8,10 +8,10 @@ from pathlib import Path
from typing import Any
from typing import Callable
+from mlia.backend.corstone import is_corstone_backend
from mlia.cli.config import DEFAULT_CLUSTERING_TARGET
from mlia.cli.config import DEFAULT_PRUNING_TARGET
from mlia.cli.config import get_available_backends
-from mlia.cli.config import is_corstone_backend
from mlia.core.typing import OutputFormat
from mlia.target.config import get_builtin_supported_profile_names
@@ -47,12 +47,12 @@ def add_target_options(
"--target-profile",
required=required,
default=default_target_profile,
- help="Builtin target profile: {target_profiles}"
- "or path to custom target profile"
+ help="Built-in target profile or path to the custom target profile. "
+ f"Built-in target profiles are {', '.join(target_profiles)}. "
"Target profile that will set the target options "
"such as target, mac value, memory mode, etc. "
"For the values associated with each target profile "
- "please refer to the documentation.",
+ "please refer to the documentation. ",
)
diff --git a/src/mlia/resources/backend_configs/systems/SYSTEMS.txt b/src/mlia/resources/backend_configs/systems/SYSTEMS.txt
deleted file mode 100644
index 3861769..0000000
--- a/src/mlia/resources/backend_configs/systems/SYSTEMS.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-SPDX-License-Identifier: Apache-2.0
-
-This directory contains the configuration files of the system backends.
-
-Supported systems:
-
- * FVP Corstone-300 Ecosystem
- * FVP Corstone-310 Ecosystem
diff --git a/src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json b/src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json
deleted file mode 100644
index 7bc12c7..0000000
--- a/src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json
+++ /dev/null
@@ -1,72 +0,0 @@
-[
- {
- "name": "Corstone-300: Cortex-M55+Ethos-U55",
- "description": "Cortex-M55 and Ethos-U55 functional model implementations based on Corstone-300 design for MPS3 board.",
- "annotations": {
- "ip_class": "Ethos-U55",
- "sim_type": "FM",
- "variant": "Cortex-M55+Ethos-U55"
- },
- "commands": {
- "run": [
- "/opt/VHT/VHT_Corstone_SSE-300_Ethos-U55 -a {software.variables:eval_app} {user_params:input_file}@0x90000000 -C {user_params:mac} -C mps3_board.telnetterminal0.start_telnet=0 -C mps3_board.uart0.out_file='-' -C mps3_board.uart0.shutdown_on_eot=1 -C mps3_board.visualisation.disable-visualisation=1 --stat"
- ]
- },
- "user_params": {
- "run": [
- {
- "name": "--data",
- "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
- "values": [],
- "alias": "input_file"
- },
- {
- "name": "ethosu.num_macs=",
- "description": "Arm Ethos-U55 configuration - the number represents MACs per cycle.",
- "values": [
- "32",
- "64",
- "128",
- "256"
- ],
- "default_value": "256",
- "alias": "mac"
- }
- ]
- }
- },
- {
- "name": "Corstone-300: Cortex-M55+Ethos-U65",
- "description": "Cortex-M55 and Ethos-U65 functional model implementations based on Corstone-300 design for MPS3 board.",
- "annotations": {
- "ip_class": "Ethos-U65",
- "sim_type": "FM",
- "variant": "Cortex-M55+Ethos-U65"
- },
- "commands": {
- "run": [
- "/opt/VHT/VHT_Corstone_SSE-300_Ethos-U65 -a {software.variables:eval_app} {user_params:input_file}@0x90000000 -C {user_params:mac} -C mps3_board.telnetterminal0.start_telnet=0 -C mps3_board.uart0.out_file='-' -C mps3_board.uart0.shutdown_on_eot=1 -C mps3_board.visualisation.disable-visualisation=1 --stat"
- ]
- },
- "user_params": {
- "run": [
- {
- "name": "--data",
- "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
- "values": [],
- "alias": "input_file"
- },
- {
- "name": "ethosu.num_macs=",
- "description": "Arm Ethos-U65 configuration - the number represents MACs per cycle.",
- "values": [
- "256",
- "512"
- ],
- "default_value": "512",
- "alias": "mac"
- }
- ]
- }
- }
-]
diff --git a/src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json.license b/src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json b/src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json
deleted file mode 100644
index c27c6f5..0000000
--- a/src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json
+++ /dev/null
@@ -1,72 +0,0 @@
-[
- {
- "name": "Corstone-300: Cortex-M55+Ethos-U55",
- "description": "Cortex-M55 and Ethos-U55 functional model implementations based on Corstone-300 design for MPS3 board.",
- "annotations": {
- "ip_class": "Ethos-U55",
- "sim_type": "FM",
- "variant": "Cortex-M55+Ethos-U55"
- },
- "commands": {
- "run": [
- "FVP_Corstone_SSE-300_Ethos-U55 -a {software.variables:eval_app} {user_params:input_file}@0x90000000 -C {user_params:mac} -C mps3_board.telnetterminal0.start_telnet=0 -C mps3_board.uart0.out_file='-' -C mps3_board.uart0.shutdown_on_eot=1 -C mps3_board.visualisation.disable-visualisation=1 --stat"
- ]
- },
- "user_params": {
- "run": [
- {
- "name": "--data",
- "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
- "values": [],
- "alias": "input_file"
- },
- {
- "name": "ethosu.num_macs=",
- "description": "Arm Ethos-U55 configuration - the number represents MACs per cycle.",
- "values": [
- "32",
- "64",
- "128",
- "256"
- ],
- "default_value": "256",
- "alias": "mac"
- }
- ]
- }
- },
- {
- "name": "Corstone-300: Cortex-M55+Ethos-U65",
- "description": "Cortex-M55 and Ethos-U65 functional model implementations based on Corstone-300 design for MPS3 board.",
- "annotations": {
- "ip_class": "Ethos-U65",
- "sim_type": "FM",
- "variant": "Cortex-M55+Ethos-U65"
- },
- "commands": {
- "run": [
- "FVP_Corstone_SSE-300_Ethos-U65 -a {software.variables:eval_app} {user_params:input_file}@0x90000000 -C {user_params:mac} -C mps3_board.telnetterminal0.start_telnet=0 -C mps3_board.uart0.out_file='-' -C mps3_board.uart0.shutdown_on_eot=1 -C mps3_board.visualisation.disable-visualisation=1 --stat"
- ]
- },
- "user_params": {
- "run": [
- {
- "name": "--data",
- "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
- "values": [],
- "alias": "input_file"
- },
- {
- "name": "ethosu.num_macs=",
- "description": "Arm Ethos-U65 configuration - the number represents MACs per cycle.",
- "values": [
- "256",
- "512"
- ],
- "default_value": "512",
- "alias": "mac"
- }
- ]
- }
- }
-]
diff --git a/src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json.license b/src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json b/src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json
deleted file mode 100644
index dcb105a..0000000
--- a/src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json
+++ /dev/null
@@ -1,72 +0,0 @@
-[
- {
- "name": "Corstone-310: Cortex-M85+Ethos-U55",
- "description": "Cortex-M85 and Ethos-U55 functional model implementations based on Corstone-310 design for MPS3 board.",
- "annotations": {
- "ip_class": "Ethos-U55",
- "sim_type": "FM",
- "variant": "Cortex-M85+Ethos-U55"
- },
- "commands": {
- "run": [
- "/opt/VHT/VHT_Corstone_SSE-310 -a {software.variables:eval_app} {user_params:input_file}@0x90000000 -C {user_params:mac} -C mps3_board.telnetterminal0.start_telnet=0 -C mps3_board.uart0.out_file='-' -C mps3_board.uart0.shutdown_on_eot=1 -C mps3_board.visualisation.disable-visualisation=1 --stat"
- ]
- },
- "user_params": {
- "run": [
- {
- "name": "--data",
- "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
- "values": [],
- "alias": "input_file"
- },
- {
- "name": "ethosu.num_macs=",
- "description": "Arm Ethos-U55 configuration - the number represents MACs per cycle.",
- "values": [
- "32",
- "64",
- "128",
- "256"
- ],
- "default_value": "256",
- "alias": "mac"
- }
- ]
- }
- },
- {
- "name": "Corstone-310: Cortex-M85+Ethos-U65",
- "description": "Cortex-M85 and Ethos-U65 functional model implementations based on Corstone-310 design for MPS3 board.",
- "annotations": {
- "ip_class": "Ethos-U65",
- "sim_type": "FM",
- "variant": "Cortex-M85+Ethos-U65"
- },
- "commands": {
- "run": [
- "/opt/VHT/VHT_Corstone_SSE-310_Ethos-U65 -a {software.variables:eval_app} {user_params:input_file}@0x90000000 -C {user_params:mac} -C mps3_board.telnetterminal0.start_telnet=0 -C mps3_board.uart0.out_file='-' -C mps3_board.uart0.shutdown_on_eot=1 -C mps3_board.visualisation.disable-visualisation=1 --stat"
- ]
- },
- "user_params": {
- "run": [
- {
- "name": "--data",
- "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
- "values": [],
- "alias": "input_file"
- },
- {
- "name": "ethosu.num_macs=",
- "description": "Arm Ethos-U65 configuration - the number represents MACs per cycle.",
- "values": [
- "256",
- "512"
- ],
- "default_value": "512",
- "alias": "mac"
- }
- ]
- }
- }
-]
diff --git a/src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json.license b/src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json b/src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json
deleted file mode 100644
index 6f4f89b..0000000
--- a/src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json
+++ /dev/null
@@ -1,72 +0,0 @@
-[
- {
- "name": "Corstone-310: Cortex-M85+Ethos-U55",
- "description": "Cortex-M85 and Ethos-U55 functional model implementations based on Corstone-310 design for MPS3 board.",
- "annotations": {
- "ip_class": "Ethos-U55",
- "sim_type": "FM",
- "variant": "Cortex-M85+Ethos-U55"
- },
- "commands": {
- "run": [
- "FVP_Corstone_SSE-310 -a {software.variables:eval_app} {user_params:input_file}@0x90000000 -C {user_params:mac} -C mps3_board.telnetterminal0.start_telnet=0 -C mps3_board.uart0.out_file='-' -C mps3_board.uart0.shutdown_on_eot=1 -C mps3_board.visualisation.disable-visualisation=1 --stat"
- ]
- },
- "user_params": {
- "run": [
- {
- "name": "--data",
- "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
- "values": [],
- "alias": "input_file"
- },
- {
- "name": "ethosu.num_macs=",
- "description": "Arm Ethos-U55 configuration - the number represents MACs per cycle.",
- "values": [
- "32",
- "64",
- "128",
- "256"
- ],
- "default_value": "256",
- "alias": "mac"
- }
- ]
- }
- },
- {
- "name": "Corstone-310: Cortex-M85+Ethos-U65",
- "description": "Cortex-M85 and Ethos-U65 functional model implementations based on Corstone-310 design for MPS3 board.",
- "annotations": {
- "ip_class": "Ethos-U65",
- "sim_type": "FM",
- "variant": "Cortex-M85+Ethos-U65"
- },
- "commands": {
- "run": [
- "FVP_Corstone_SSE-310_Ethos-U65 -a {software.variables:eval_app} {user_params:input_file}@0x90000000 -C {user_params:mac} -C mps3_board.telnetterminal0.start_telnet=0 -C mps3_board.uart0.out_file='-' -C mps3_board.uart0.shutdown_on_eot=1 -C mps3_board.visualisation.disable-visualisation=1 --stat"
- ]
- },
- "user_params": {
- "run": [
- {
- "name": "--data",
- "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
- "values": [],
- "alias": "input_file"
- },
- {
- "name": "ethosu.num_macs=",
- "description": "Arm Ethos-U65 configuration - the number represents MACs per cycle.",
- "values": [
- "256",
- "512"
- ],
- "default_value": "512",
- "alias": "mac"
- }
- ]
- }
- }
-]
diff --git a/src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json.license b/src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/src/mlia/resources/backends/applications/APPLICATIONS.txt b/src/mlia/resources/backends/applications/APPLICATIONS.txt
index ca1003b..dd7ffdd 100644
--- a/src/mlia/resources/backends/applications/APPLICATIONS.txt
+++ b/src/mlia/resources/backends/applications/APPLICATIONS.txt
@@ -1,7 +1,5 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
SPDX-License-Identifier: Apache-2.0
This directory contains the application packages for the Generic Inference
Runner.
-
-Each package should contain its own backend-config.json file.
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.08.02-ethos-U55-Default-noTA/backend-config.json b/src/mlia/resources/backends/applications/inference_runner-sse-300-22.08.02-ethos-U55-Default-noTA/backend-config.json
deleted file mode 100644
index 4d8c928..0000000
--- a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.08.02-ethos-U55-Default-noTA/backend-config.json
+++ /dev/null
@@ -1,14 +0,0 @@
-[
- {
- "name": "Generic Inference Runner: Ethos-U55",
- "description": "This application allows running inferences using custom NN TensorFlow Lite models on Ethos-U. No data pre-/post-processing is executed.",
- "supported_systems": [
- {
- "name": "Corstone-300: Cortex-M55+Ethos-U55"
- }
- ],
- "variables": {
- "eval_app": "{software.config_dir}/ethos-u-inference_runner.axf"
- }
- }
-]
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.08.02-ethos-U55-Default-noTA/backend-config.json.license b/src/mlia/resources/backends/applications/inference_runner-sse-300-22.08.02-ethos-U55-Default-noTA/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.08.02-ethos-U55-Default-noTA/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.08.02-ethos-U65-Default-noTA/backend-config.json b/src/mlia/resources/backends/applications/inference_runner-sse-300-22.08.02-ethos-U65-Default-noTA/backend-config.json
deleted file mode 100644
index 22ba2d9..0000000
--- a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.08.02-ethos-U65-Default-noTA/backend-config.json
+++ /dev/null
@@ -1,14 +0,0 @@
-[
- {
- "name": "Generic Inference Runner: Ethos-U65",
- "description": "This application allows running inferences using custom NN TensorFlow Lite models on Ethos-U. No data pre-/post-processing is executed.",
- "supported_systems": [
- {
- "name": "Corstone-300: Cortex-M55+Ethos-U65"
- }
- ],
- "variables": {
- "eval_app": "{software.config_dir}/ethos-u-inference_runner.axf"
- }
- }
-]
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.08.02-ethos-U65-Default-noTA/backend-config.json.license b/src/mlia/resources/backends/applications/inference_runner-sse-300-22.08.02-ethos-U65-Default-noTA/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.08.02-ethos-U65-Default-noTA/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-310-22.08.02-ethos-U55-Default-noTA/backend-config.json b/src/mlia/resources/backends/applications/inference_runner-sse-310-22.08.02-ethos-U55-Default-noTA/backend-config.json
deleted file mode 100644
index f7ee996..0000000
--- a/src/mlia/resources/backends/applications/inference_runner-sse-310-22.08.02-ethos-U55-Default-noTA/backend-config.json
+++ /dev/null
@@ -1,14 +0,0 @@
-[
- {
- "name": "Generic Inference Runner: Ethos-U55",
- "description": "This application allows running inferences using custom NN TensorFlow Lite models on Ethos-U. No data pre-/post-processing is executed.",
- "supported_systems": [
- {
- "name": "Corstone-310: Cortex-M85+Ethos-U55"
- }
- ],
- "variables": {
- "eval_app": "{software.config_dir}/ethos-u-inference_runner.axf"
- }
- }
-]
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-310-22.08.02-ethos-U55-Default-noTA/backend-config.json.license b/src/mlia/resources/backends/applications/inference_runner-sse-310-22.08.02-ethos-U55-Default-noTA/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/src/mlia/resources/backends/applications/inference_runner-sse-310-22.08.02-ethos-U55-Default-noTA/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-310-22.08.02-ethos-U65-Default-noTA/backend-config.json b/src/mlia/resources/backends/applications/inference_runner-sse-310-22.08.02-ethos-U65-Default-noTA/backend-config.json
deleted file mode 100644
index 21d8239..0000000
--- a/src/mlia/resources/backends/applications/inference_runner-sse-310-22.08.02-ethos-U65-Default-noTA/backend-config.json
+++ /dev/null
@@ -1,14 +0,0 @@
-[
- {
- "name": "Generic Inference Runner: Ethos-U65",
- "description": "This application allows running inferences using custom NN TensorFlow Lite models on Ethos-U. No data pre-/post-processing is executed.",
- "supported_systems": [
- {
- "name": "Corstone-310: Cortex-M85+Ethos-U65"
- }
- ],
- "variables": {
- "eval_app": "{software.config_dir}/ethos-u-inference_runner.axf"
- }
- }
-]
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-310-22.08.02-ethos-U65-Default-noTA/backend-config.json.license b/src/mlia/resources/backends/applications/inference_runner-sse-310-22.08.02-ethos-U65-Default-noTA/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/src/mlia/resources/backends/applications/inference_runner-sse-310-22.08.02-ethos-U65-Default-noTA/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/src/mlia/resources/backends/systems/.gitignore b/src/mlia/resources/backends/systems/.gitignore
deleted file mode 100644
index 0226166..0000000
--- a/src/mlia/resources/backends/systems/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-# Ignore everything in this directory
-*
-# Except this file
-!.gitignore
diff --git a/src/mlia/target/cortex_a/__init__.py b/src/mlia/target/cortex_a/__init__.py
index 9b0e611..f686bfc 100644
--- a/src/mlia/target/cortex_a/__init__.py
+++ b/src/mlia/target/cortex_a/__init__.py
@@ -1,7 +1,7 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Cortex-A target module."""
from mlia.target.registry import registry
from mlia.target.registry import TargetInfo
-registry.register("Cortex-A", TargetInfo(["ArmNNTFLiteDelegate"]))
+registry.register("cortex-a", TargetInfo(["ArmNNTFLiteDelegate"]))
diff --git a/src/mlia/target/ethos_u/__init__.py b/src/mlia/target/ethos_u/__init__.py
index 3c92ae5..d53be53 100644
--- a/src/mlia/target/ethos_u/__init__.py
+++ b/src/mlia/target/ethos_u/__init__.py
@@ -1,8 +1,8 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Ethos-U target module."""
from mlia.target.registry import registry
from mlia.target.registry import TargetInfo
-registry.register("Ethos-U55", TargetInfo(["Vela", "Corstone-300", "Corstone-310"]))
-registry.register("Ethos-U65", TargetInfo(["Vela", "Corstone-300", "Corstone-310"]))
+registry.register("ethos-u55", TargetInfo(["Vela", "Corstone-300", "Corstone-310"]))
+registry.register("ethos-u65", TargetInfo(["Vela", "Corstone-300", "Corstone-310"]))
diff --git a/src/mlia/target/ethos_u/performance.py b/src/mlia/target/ethos_u/performance.py
index 0d791a1..be1a287 100644
--- a/src/mlia/target/ethos_u/performance.py
+++ b/src/mlia/target/ethos_u/performance.py
@@ -11,20 +11,17 @@ from typing import Union
import mlia.backend.vela.compiler as vela_comp
import mlia.backend.vela.performance as vela_perf
-from mlia.backend.corstone.performance import DeviceInfo
+from mlia.backend.corstone import is_corstone_backend
from mlia.backend.corstone.performance import estimate_performance
-from mlia.backend.corstone.performance import ModelInfo
-from mlia.backend.registry import get_supported_backends
from mlia.core.context import Context
from mlia.core.performance import PerformanceEstimator
from mlia.nn.tensorflow.config import get_tflite_model
from mlia.nn.tensorflow.config import ModelConfiguration
from mlia.nn.tensorflow.optimizations.select import OptimizationSettings
from mlia.target.ethos_u.config import EthosUConfiguration
-from mlia.target.registry import is_supported
+from mlia.target.registry import supported_backends
from mlia.utils.logging import log_action
-
logger = logging.getLogger(__name__)
@@ -186,14 +183,11 @@ class CorstonePerformanceEstimator(
model_path, self.device.compiler_options, optimized_model_path
)
- model_info = ModelInfo(model_path=optimized_model_path)
- device_info = DeviceInfo(
- device_type=self.device.target, # type: ignore
- mac=self.device.mac,
- )
-
corstone_perf_metrics = estimate_performance(
- model_info, device_info, self.backend
+ self.device.target,
+ self.device.mac,
+ optimized_model_path,
+ self.backend,
)
return NPUCycles(
@@ -222,11 +216,12 @@ class EthosUPerformanceEstimator(
self.device = device
if backends is None:
backends = ["Vela"] # Only Vela is always available as default
+ ethos_u_backends = supported_backends(device.target)
for backend in backends:
- if backend != "Vela" and not is_supported(backend):
+ if backend != "Vela" and backend not in ethos_u_backends:
raise ValueError(
f"Unsupported backend '{backend}'. "
- f"Only 'Vela' and {get_supported_backends()} "
+ f"Only 'Vela' and {ethos_u_backends} "
"are supported."
)
self.backends = set(backends)
@@ -241,12 +236,11 @@ class EthosUPerformanceEstimator(
memory_usage = None
npu_cycles = None
-
for backend in self.backends:
if backend == "Vela":
vela_estimator = VelaPerformanceEstimator(self.context, self.device)
memory_usage = vela_estimator.estimate(tflite_model)
- elif backend in get_supported_backends():
+ elif is_corstone_backend(backend):
corstone_estimator = CorstonePerformanceEstimator(
self.context, self.device, backend
)
diff --git a/src/mlia/target/registry.py b/src/mlia/target/registry.py
index 325dd04..4870fc8 100644
--- a/src/mlia/target/registry.py
+++ b/src/mlia/target/registry.py
@@ -3,12 +3,9 @@
"""Target module."""
from __future__ import annotations
-from typing import cast
-
from mlia.backend.config import BackendType
-from mlia.backend.manager import DefaultInstallationManager
+from mlia.backend.manager import get_installation_manager
from mlia.backend.registry import registry as backend_registry
-from mlia.cli.config import get_installation_manager
from mlia.core.common import AdviceCategory
from mlia.core.reporting import Column
from mlia.core.reporting import Table
@@ -65,14 +62,23 @@ def supported_targets(advice: AdviceCategory) -> list[str]:
]
+def all_supported_backends() -> set[str]:
+ """Return set of all supported backends by all targets."""
+ return {
+ backend
+ for item in registry.items.values()
+ for backend in item.supported_backends
+ }
+
+
def table() -> Table:
"""Get a table representation of registered targets with backends."""
def get_status(backend: str) -> str:
if backend_registry.items[backend].type == BackendType.BUILTIN:
return BackendType.BUILTIN.name
- mgr = cast(DefaultInstallationManager, get_installation_manager())
- return "INSTALLED" if mgr.already_installed(backend) else "NOT INSTALLED"
+ mgr = get_installation_manager()
+ return "INSTALLED" if mgr.backend_installed(backend) else "NOT INSTALLED"
def get_advice(target: str) -> tuple[str, str, str]:
supported = supported_advice(target)
diff --git a/src/mlia/target/tosa/__init__.py b/src/mlia/target/tosa/__init__.py
index 33c9cf2..06bf1a9 100644
--- a/src/mlia/target/tosa/__init__.py
+++ b/src/mlia/target/tosa/__init__.py
@@ -1,7 +1,7 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""TOSA target module."""
from mlia.target.registry import registry
from mlia.target.registry import TargetInfo
-registry.register("TOSA", TargetInfo(["TOSA-Checker"]))
+registry.register("tosa", TargetInfo(["tosa-checker"]))
diff --git a/src/mlia/utils/proc.py b/src/mlia/utils/proc.py
new file mode 100644
index 0000000..d11bfc5
--- /dev/null
+++ b/src/mlia/utils/proc.py
@@ -0,0 +1,55 @@
+# SPDX-FileCopyrightText: Copyright 2023, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Module for process management."""
+from __future__ import annotations
+
+import logging
+import subprocess # nosec
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Callable
+from typing import Generator
+
+
+logger = logging.getLogger(__name__)
+
+
+@dataclass(frozen=True)
+class Command:
+ """Command information."""
+
+ cmd: list[str]
+ cwd: Path = Path.cwd()
+ env: dict[str, str] | None = None
+
+
+def command_output(command: Command) -> Generator[str, None, None]:
+ """Get command output."""
+ logger.debug("Running command: %s", command)
+
+ with subprocess.Popen( # nosec
+ command.cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.STDOUT,
+ universal_newlines=True,
+ bufsize=1,
+ cwd=command.cwd,
+ env=command.env,
+ ) as process:
+ yield from process.stdout or []
+
+ if process.returncode:
+ raise subprocess.CalledProcessError(process.returncode, command.cmd)
+
+
+OutputConsumer = Callable[[str], None]
+
+
+def process_command_output(
+ command: Command,
+ consumers: list[OutputConsumer],
+) -> None:
+ """Execute command and process output."""
+ for line in command_output(command):
+ for consumer in consumers:
+ consumer(line)
diff --git a/tests/conftest.py b/tests/conftest.py
index 67549e7..d797869 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -2,9 +2,7 @@
# SPDX-License-Identifier: Apache-2.0
"""Pytest conf module."""
import shutil
-import tarfile
from pathlib import Path
-from typing import Any
from typing import Generator
import pytest
@@ -31,18 +29,6 @@ def fixture_sample_context(tmpdir: str) -> ExecutionContext:
@pytest.fixture(scope="session")
-def test_systems_path(test_resources_path: Path) -> Path:
- """Return test systems path in a pytest fixture."""
- return test_resources_path / "backends" / "systems"
-
-
-@pytest.fixture(scope="session")
-def test_applications_path(test_resources_path: Path) -> Path:
- """Return test applications path in a pytest fixture."""
- return test_resources_path / "backends" / "applications"
-
-
-@pytest.fixture(scope="session")
def non_optimised_input_model_file(test_tflite_model: Path) -> Path:
"""Provide the path to a quantized test model file."""
return test_tflite_model
@@ -60,66 +46,6 @@ def invalid_input_model_file(test_tflite_invalid_model: Path) -> Path:
return test_tflite_invalid_model
-@pytest.fixture(autouse=True)
-def test_resources(monkeypatch: pytest.MonkeyPatch, test_resources_path: Path) -> Any:
- """Force using test resources as middleware's repository."""
-
- def get_test_resources() -> Path:
- """Return path to the test resources."""
- return test_resources_path / "backends"
-
- monkeypatch.setattr(
- "mlia.backend.executor.fs.get_backend_resources", get_test_resources
- )
- yield
-
-
-def create_archive(
- archive_name: str, source: Path, destination: Path, with_root_folder: bool = False
-) -> None:
- """Create archive from directory source."""
- with tarfile.open(destination / archive_name, mode="w:gz") as tar:
- for item in source.iterdir():
- item_name = item.name
- if with_root_folder:
- item_name = f"{source.name}/{item_name}"
- tar.add(item, item_name)
-
-
-def process_directory(source: Path, destination: Path) -> None:
- """Process resource directory."""
- destination.mkdir()
-
- for item in source.iterdir():
- if item.is_dir():
- create_archive(f"{item.name}.tar.gz", item, destination)
- create_archive(f"{item.name}_dir.tar.gz", item, destination, True)
-
-
-@pytest.fixture(scope="session", autouse=True)
-def add_archives(
- test_resources_path: Path, tmp_path_factory: pytest.TempPathFactory
-) -> Any:
- """Generate archives of the test resources."""
- tmp_path = tmp_path_factory.mktemp("archives")
-
- archives_path = tmp_path / "archives"
- archives_path.mkdir()
-
- if (archives_path_link := test_resources_path / "archives").is_symlink():
- archives_path_link.unlink()
-
- archives_path_link.symlink_to(archives_path, target_is_directory=True)
-
- for item in ["applications", "systems"]:
- process_directory(test_resources_path / "backends" / item, archives_path / item)
-
- yield
-
- archives_path_link.unlink()
- shutil.rmtree(tmp_path)
-
-
def get_test_keras_model() -> tf.keras.Model:
"""Return test Keras model."""
model = tf.keras.Sequential(
diff --git a/tests/test_backend_corstone.py b/tests/test_backend_corstone.py
new file mode 100644
index 0000000..29ef084
--- /dev/null
+++ b/tests/test_backend_corstone.py
@@ -0,0 +1,11 @@
+# SPDX-FileCopyrightText: Copyright 2023, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Tests for Corstone backend."""
+from mlia.backend.corstone import is_corstone_backend
+
+
+def test_is_corstone_backend() -> None:
+ """Test function is_corstone_backend."""
+ assert is_corstone_backend("Corstone-300") is True
+ assert is_corstone_backend("Corstone-310") is True
+ assert is_corstone_backend("New backend") is False
diff --git a/tests/test_backend_corstone_install.py b/tests/test_backend_corstone_install.py
index 3b05a49..b9e6569 100644
--- a/tests/test_backend_corstone_install.py
+++ b/tests/test_backend_corstone_install.py
@@ -1,490 +1,63 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Tests for Corstone related installation functions.."""
from __future__ import annotations
-import tarfile
from pathlib import Path
-from typing import Iterable
+from typing import Any
+from unittest.mock import call
from unittest.mock import MagicMock
import pytest
from mlia.backend.corstone.install import Corstone300Installer
-from mlia.backend.corstone.install import get_corstone_300_installation
-from mlia.backend.corstone.install import get_corstone_310_installation
from mlia.backend.corstone.install import get_corstone_installations
-from mlia.backend.corstone.install import PackagePathChecker
-from mlia.backend.corstone.install import StaticPathChecker
-from mlia.backend.executor.runner import BackendRunner
-from mlia.backend.install import BackendInfo
-from mlia.backend.install import BackendInstallation
-from mlia.backend.install import BackendInstaller
-from mlia.backend.install import BackendMetadata
-from mlia.backend.install import CompoundPathChecker
-from mlia.backend.install import DownloadAndInstall
-from mlia.backend.install import InstallFromPath
-from mlia.backend.install import PathChecker
-
-
-@pytest.fixture(name="test_mlia_resources")
-def fixture_test_mlia_resources(
- tmp_path: Path, monkeypatch: pytest.MonkeyPatch
-) -> Path:
- """Redirect MLIA resources resolution to the temp directory."""
- mlia_resources = tmp_path / "resources"
- mlia_resources.mkdir()
-
- monkeypatch.setattr(
- "mlia.backend.install.get_mlia_resources",
- MagicMock(return_value=mlia_resources),
- )
-
- return mlia_resources
-
-
-def get_backend_installation( # pylint: disable=too-many-arguments
- backend_runner_mock: MagicMock = MagicMock(),
- name: str = "test_name",
- description: str = "test_description",
- download_artifact: MagicMock | None = None,
- path_checker: PathChecker = MagicMock(),
- apps_resources: list[str] | None = None,
- system_config: str | None = None,
- backend_installer: BackendInstaller = MagicMock(),
- supported_platforms: list[str] | None = None,
-) -> BackendInstallation:
- """Get backend installation."""
- return BackendInstallation(
- backend_runner=backend_runner_mock,
- metadata=BackendMetadata(
- name=name,
- description=description,
- system_config=system_config or "",
- apps_resources=apps_resources or [],
- fvp_dir_name="sample_dir",
- download_artifact=download_artifact,
- supported_platforms=supported_platforms,
- ),
- path_checker=path_checker,
- backend_installer=backend_installer,
- )
-
-
-@pytest.mark.parametrize(
- "platform, supported_platforms, expected_result",
- [
- ["Linux", ["Linux"], True],
- ["Linux", [], True],
- ["Linux", None, True],
- ["Windows", ["Linux"], False],
- ],
-)
-def test_could_be_installed_depends_on_platform(
- platform: str,
- supported_platforms: list[str] | None,
- expected_result: bool,
- monkeypatch: pytest.MonkeyPatch,
-) -> None:
- """Test that installation could not be installed on unsupported platform."""
- monkeypatch.setattr(
- "mlia.backend.install.platform.system",
- MagicMock(return_value=platform),
- )
- monkeypatch.setattr(
- "mlia.backend.install.all_paths_valid",
- MagicMock(return_value=True),
- )
- backend_runner_mock = MagicMock(spec=BackendRunner)
-
- installation = get_backend_installation(
- backend_runner_mock,
- supported_platforms=supported_platforms,
- )
- assert installation.could_be_installed == expected_result
+from mlia.backend.install import Installation
def test_get_corstone_installations() -> None:
- """Test function get_corstone_installation."""
- installs = get_corstone_installations()
- assert len(installs) == 2
- assert all(isinstance(install, BackendInstallation) for install in installs)
-
-
-def test_backend_installation_metadata_resolving() -> None:
- """Test backend installation metadata resolving."""
- backend_runner_mock = MagicMock(spec=BackendRunner)
- installation = get_backend_installation(backend_runner_mock)
-
- assert installation.name == "test_name"
- assert installation.description == "test_description"
-
- backend_runner_mock.all_installed.return_value = False
- assert installation.already_installed is False
-
- assert installation.could_be_installed is True
-
-
-def test_backend_installation_supported_install_types(tmp_path: Path) -> None:
- """Test supported installation types."""
- installation_no_download_artifact = get_backend_installation()
- assert installation_no_download_artifact.supports(DownloadAndInstall()) is False
-
- installation_with_download_artifact = get_backend_installation(
- download_artifact=MagicMock()
- )
- assert installation_with_download_artifact.supports(DownloadAndInstall()) is True
-
- path_checker_mock = MagicMock(return_value=BackendInfo(tmp_path))
- installation_can_install_from_dir = get_backend_installation(
- path_checker=path_checker_mock
- )
- assert installation_can_install_from_dir.supports(InstallFromPath(tmp_path)) is True
-
- any_installation = get_backend_installation()
- assert any_installation.supports("unknown_install_type") is False # type: ignore
-
-
-def test_backend_installation_install_wrong_type() -> None:
- """Test that operation should fail if wrong install type provided."""
- with pytest.raises(Exception, match="Unable to install wrong_install_type"):
- backend_runner_mock = MagicMock(spec=BackendRunner)
- installation = get_backend_installation(backend_runner_mock)
-
- installation.install("wrong_install_type") # type: ignore
-
-
-def test_backend_installation_install_from_path(
- tmp_path: Path, test_mlia_resources: Path
-) -> None:
- """Test installation from the path."""
- system_config = test_mlia_resources / "example_config.json"
- system_config.touch()
-
- sample_app = test_mlia_resources / "sample_app"
- sample_app.mkdir()
-
- dist_dir = tmp_path / "dist"
- dist_dir.mkdir()
-
- path_checker_mock = MagicMock(return_value=BackendInfo(dist_dir))
-
- backend_runner_mock = MagicMock(spec=BackendRunner)
- installation = get_backend_installation(
- backend_runner_mock=backend_runner_mock,
- path_checker=path_checker_mock,
- apps_resources=[sample_app.name],
- system_config="example_config.json",
- )
-
- assert installation.supports(InstallFromPath(dist_dir)) is True
- installation.install(InstallFromPath(dist_dir))
-
- backend_runner_mock.install_system.assert_called_once()
- backend_runner_mock.install_application.assert_called_once_with(sample_app)
-
-
-@pytest.mark.parametrize("copy_source", [True, False])
-def test_backend_installation_install_from_static_path(
- tmp_path: Path, test_mlia_resources: Path, copy_source: bool
-) -> None:
- """Test installation from the predefined path."""
- system_config = test_mlia_resources / "example_config.json"
- system_config.touch()
-
- custom_system_config = test_mlia_resources / "custom_config.json"
- custom_system_config.touch()
-
- sample_app = test_mlia_resources / "sample_app"
- sample_app.mkdir()
-
- predefined_location = tmp_path / "backend"
- predefined_location.mkdir()
-
- predefined_location_file = predefined_location / "file.txt"
- predefined_location_file.touch()
-
- predefined_location_dir = predefined_location / "folder"
- predefined_location_dir.mkdir()
- nested_file = predefined_location_dir / "nested_file.txt"
- nested_file.touch()
-
- backend_runner_mock = MagicMock(spec=BackendRunner)
-
- def check_install_dir(install_dir: Path) -> None:
- """Check content of the install dir."""
- assert install_dir.is_dir()
- files = list(install_dir.iterdir())
-
- if copy_source:
- assert len(files) == 3
- assert all(install_dir / item in files for item in ["file.txt", "folder"])
- assert (install_dir / "folder/nested_file.txt").is_file()
- else:
- assert len(files) == 1
-
- assert install_dir / "custom_config.json" in files
-
- backend_runner_mock.install_system.side_effect = check_install_dir
-
- installation = get_backend_installation(
- backend_runner_mock=backend_runner_mock,
- path_checker=StaticPathChecker(
- predefined_location,
- ["file.txt"],
- copy_source=copy_source,
- system_config=str(custom_system_config),
- ),
- apps_resources=[sample_app.name],
- system_config="example_config.json",
- )
-
- assert installation.supports(InstallFromPath(predefined_location)) is True
- installation.install(InstallFromPath(predefined_location))
-
- backend_runner_mock.install_system.assert_called_once()
- backend_runner_mock.install_application.assert_called_once_with(sample_app)
-
-
-def create_sample_fvp_archive(tmp_path: Path) -> Path:
- """Create sample FVP tar archive."""
- fvp_archive_dir = tmp_path / "archive"
- fvp_archive_dir.mkdir()
-
- sample_file = fvp_archive_dir / "sample.txt"
- sample_file.write_text("Sample file")
-
- sample_dir = fvp_archive_dir / "sample_dir"
- sample_dir.mkdir()
-
- fvp_archive = tmp_path / "archive.tgz"
- with tarfile.open(fvp_archive, "w:gz") as fvp_archive_tar:
- fvp_archive_tar.add(fvp_archive_dir, arcname=fvp_archive_dir.name)
-
- return fvp_archive
-
-
-def test_backend_installation_download_and_install(
- test_mlia_resources: Path, tmp_path: Path
-) -> None:
- """Test downloading and installation process."""
- fvp_archive = create_sample_fvp_archive(tmp_path)
-
- system_config = test_mlia_resources / "example_config.json"
- system_config.touch()
-
- download_artifact_mock = MagicMock()
- download_artifact_mock.download_to.return_value = fvp_archive
+ """Test function get_corstone_installations."""
+ installations = get_corstone_installations()
+ assert len(installations) == 2
- path_checker = PackagePathChecker(["archive/sample.txt"], "archive/sample_dir")
-
- def installer(_eula_agreement: bool, dist_dir: Path) -> Path:
- """Sample installer."""
- return dist_dir
-
- backend_runner_mock = MagicMock(spec=BackendRunner)
- installation = get_backend_installation(
- backend_runner_mock,
- download_artifact=download_artifact_mock,
- backend_installer=installer,
- path_checker=path_checker,
- system_config="example_config.json",
- )
-
- installation.install(DownloadAndInstall())
-
- backend_runner_mock.install_system.assert_called_once()
-
-
-@pytest.mark.parametrize(
- "dir_content, expected_result",
- [
- [
- ["models/", "file1.txt", "file2.txt"],
- "models",
- ],
- [
- ["file1.txt", "file2.txt"],
- None,
- ],
- [
- ["models/", "file2.txt"],
- None,
- ],
- ],
-)
-def test_corstone_path_checker_valid_path(
- tmp_path: Path, dir_content: list[str], expected_result: str | None
-) -> None:
- """Test Corstone path checker valid scenario."""
- path_checker = PackagePathChecker(["file1.txt", "file2.txt"], "models")
-
- for item in dir_content:
- if item.endswith("/"):
- item_dir = tmp_path / item
- item_dir.mkdir()
- else:
- item_file = tmp_path / item
- item_file.touch()
-
- result = path_checker(tmp_path)
- expected = (
- None if expected_result is None else BackendInfo(tmp_path / expected_result)
- )
-
- assert result == expected
-
-
-@pytest.mark.parametrize("system_config", [None, "system_config"])
-@pytest.mark.parametrize("copy_source", [True, False])
-def test_static_path_checker(
- tmp_path: Path, copy_source: bool, system_config: str | None
-) -> None:
- """Test static path checker."""
- static_checker = StaticPathChecker(
- tmp_path, [], copy_source=copy_source, system_config=system_config
- )
- assert static_checker(tmp_path) == BackendInfo(
- tmp_path, copy_source=copy_source, system_config=system_config
- )
-
-
-def test_static_path_checker_not_valid_path(tmp_path: Path) -> None:
- """Test static path checker should return None if path is not valid."""
- static_checker = StaticPathChecker(tmp_path, ["file.txt"])
- assert static_checker(tmp_path / "backend") is None
-
-
-def test_static_path_checker_not_valid_structure(tmp_path: Path) -> None:
- """Test static path checker should return None if files are missing."""
- static_checker = StaticPathChecker(tmp_path, ["file.txt"])
- assert static_checker(tmp_path) is None
-
- missing_file = tmp_path / "file.txt"
- missing_file.touch()
-
- assert static_checker(tmp_path) == BackendInfo(tmp_path, copy_source=False)
-
-
-def test_compound_path_checker(tmp_path: Path) -> None:
- """Test compound path checker."""
- path_checker_path_valid_path = MagicMock(return_value=BackendInfo(tmp_path))
- path_checker_path_not_valid_path = MagicMock(return_value=None)
-
- checker = CompoundPathChecker(
- path_checker_path_valid_path, path_checker_path_not_valid_path
- )
- assert checker(tmp_path) == BackendInfo(tmp_path)
-
- checker = CompoundPathChecker(path_checker_path_not_valid_path)
- assert checker(tmp_path) is None
+ assert all(isinstance(item, Installation) for item in installations)
@pytest.mark.parametrize(
- "eula_agreement, expected_command",
+ "eula_agreement, expected_calls",
[
- [
- True,
- [
- "./FVP_Corstone_SSE-300.sh",
- "-q",
- "-d",
- "corstone-300",
- ],
- ],
+ [True, [call(["./FVP_Corstone_SSE-300.sh", "-q", "-d", "corstone-300"])]],
[
False,
[
- "./FVP_Corstone_SSE-300.sh",
- "-q",
- "-d",
- "corstone-300",
- "--nointeractive",
- "--i-agree-to-the-contained-eula",
+ call(
+ [
+ "./FVP_Corstone_SSE-300.sh",
+ "-q",
+ "-d",
+ "corstone-300",
+ "--nointeractive",
+ "--i-agree-to-the-contained-eula",
+ ]
+ )
],
],
],
)
-def test_corstone_300_installer(
+def test_corstone_installer(
tmp_path: Path,
monkeypatch: pytest.MonkeyPatch,
eula_agreement: bool,
- expected_command: list[str],
+ expected_calls: Any,
) -> None:
- """Test Corstone-300 installer."""
- command_mock = MagicMock()
+ """Test Corstone 300 installer."""
+ mock_check_call = MagicMock()
monkeypatch.setattr(
- "mlia.backend.corstone.install.subprocess.check_call", command_mock
+ "mlia.backend.corstone.install.subprocess.check_call", mock_check_call
)
- installer = Corstone300Installer()
- result = installer(eula_agreement, tmp_path)
-
- command_mock.assert_called_once_with(expected_command)
- assert result == tmp_path / "corstone-300"
-
-
-@pytest.mark.parametrize(
- "corstone_installation, expected_paths",
- [
- [
- get_corstone_300_installation(),
- {
- "/opt/VHT/VHT_Corstone_SSE-300_Ethos-U55",
- "/opt/VHT/VHT_Corstone_SSE-300_Ethos-U65",
- },
- ],
- [
- get_corstone_310_installation(),
- {
- "/opt/VHT/VHT_Corstone_SSE-310",
- "/opt/VHT/VHT_Corstone_SSE-310_Ethos-U65",
- },
- ],
- ],
-)
-def test_corstone_vht_install(
- corstone_installation: BackendInstallation,
- expected_paths: set,
- monkeypatch: pytest.MonkeyPatch,
-) -> None:
- """Test if Corstone 300/310 could be installed from /opt/VHT."""
-
- def _all_files_exist(paths: Iterable[Path]) -> bool:
- """Check if all files exist."""
- pathset = {item.as_posix() for item in paths}
- return pathset == expected_paths
- create_destination_and_install_mock = MagicMock()
-
- monkeypatch.setattr("mlia.backend.install.all_files_exist", _all_files_exist)
-
- monkeypatch.setattr(
- "mlia.backend.executor.system.get_available_systems", lambda: []
- )
-
- monkeypatch.setattr(
- "mlia.backend.executor.system.create_destination_and_install",
- create_destination_and_install_mock,
- )
-
- corstone_installation.install(InstallFromPath(Path("/opt/VHT")))
-
- create_destination_and_install_mock.assert_called_once()
-
-
-def test_corstone_uninstall(
- monkeypatch: pytest.MonkeyPatch,
-) -> None:
- """Test the uninstall function in Corstone."""
- remove_system_mock = MagicMock()
-
- monkeypatch.setattr(
- "mlia.backend.install.remove_system",
- remove_system_mock,
- )
-
- installation = get_corstone_300_installation()
+ installer = Corstone300Installer()
+ installer(eula_agreement, tmp_path)
- installation.uninstall()
- remove_system_mock.assert_called_once_with("corstone_300")
+ assert mock_check_call.mock_calls == expected_calls
diff --git a/tests/test_backend_corstone_performance.py b/tests/test_backend_corstone_performance.py
index d41062f..2d5b196 100644
--- a/tests/test_backend_corstone_performance.py
+++ b/tests/test_backend_corstone_performance.py
@@ -4,516 +4,167 @@
from __future__ import annotations
import base64
-import json
-from contextlib import ExitStack as does_not_raise
from pathlib import Path
-from typing import Any
+from typing import Generator
from unittest.mock import MagicMock
-from unittest.mock import PropertyMock
import pytest
-from mlia.backend.corstone.performance import BackendRunner
-from mlia.backend.corstone.performance import DeviceInfo
+from mlia.backend.corstone.performance import build_corstone_command
from mlia.backend.corstone.performance import estimate_performance
from mlia.backend.corstone.performance import GenericInferenceOutputParser
-from mlia.backend.corstone.performance import GenericInferenceRunnerEthosU
-from mlia.backend.corstone.performance import get_generic_runner
-from mlia.backend.corstone.performance import ModelInfo
+from mlia.backend.corstone.performance import get_metrics
from mlia.backend.corstone.performance import PerformanceMetrics
-from mlia.backend.executor.application import get_application
-from mlia.backend.executor.execution import ExecutionContext
-from mlia.backend.executor.output_consumer import Base64OutputConsumer
-from mlia.backend.executor.system import get_system
-from mlia.backend.registry import get_supported_backends
-from mlia.target.registry import is_supported
-
+from mlia.backend.errors import BackendExecutionFailed
+from mlia.utils.proc import Command
+
+
+def encode_b64(data: str) -> str:
+ """Encode data in base64 format."""
+ return base64.b64encode(data.encode()).decode()
+
+
+def valid_fvp_output() -> list[str]:
+ """Return valid FVP output that could be succesfully parsed."""
+ json_data = """[
+ {
+ "profiling_group": "Inference",
+ "count": 1,
+ "samples": [
+ {"name": "NPU IDLE", "value": [2]},
+ {"name": "NPU AXI0_RD_DATA_BEAT_RECEIVED", "value": [4]},
+ {"name": "NPU AXI0_WR_DATA_BEAT_WRITTEN", "value": [5]},
+ {"name": "NPU AXI1_RD_DATA_BEAT_RECEIVED", "value": [6]},
+ {"name": "NPU ACTIVE", "value": [1]},
+ {"name": "NPU TOTAL", "value": [3]}
+ ]
+ }
+]"""
-def _mock_encode_b64(data: dict[str, int]) -> str:
- """
- Encode the given data into a mock base64-encoded string of JSON.
+ return [
+ "some output",
+ f"<metrics>{encode_b64(json_data)}</metrics>",
+ "some_output",
+ ]
- This reproduces the base64 encoding done in the Corstone applications.
- JSON example:
+def test_generic_inference_output_parser_success() -> None:
+ """Test successful generic inference output parsing."""
+ output_parser = GenericInferenceOutputParser()
+ for line in valid_fvp_output():
+ output_parser(line)
- ```json
- [{'count': 1,
- 'profiling_group': 'Inference',
- 'samples': [{'name': 'NPU IDLE', 'value': [612]},
- {'name': 'NPU AXI0_RD_DATA_BEAT_RECEIVED', 'value': [165872]},
- {'name': 'NPU AXI0_WR_DATA_BEAT_WRITTEN', 'value': [88712]},
- {'name': 'NPU AXI1_RD_DATA_BEAT_RECEIVED', 'value': [57540]},
- {'name': 'NPU ACTIVE', 'value': [520489]},
- {'name': 'NPU TOTAL', 'value': [521101]}]}]
- ```
- """
- wrapped_data = [
- {
- "count": 1,
- "profiling_group": "Inference",
- "samples": [
- {"name": name, "value": [value]} for name, value in data.items()
- ],
- }
- ]
- json_str = json.dumps(wrapped_data)
- json_bytes = bytearray(json_str, encoding="utf-8")
- json_b64 = base64.b64encode(json_bytes).decode("utf-8")
- tag = Base64OutputConsumer.TAG_NAME
- return f"<{tag}>{json_b64}</{tag}>"
+ assert output_parser.get_metrics() == PerformanceMetrics(1, 2, 3, 4, 5, 6)
@pytest.mark.parametrize(
- "data, is_ready, result, missed_keys",
+ "wrong_fvp_output",
[
- (
- [],
- False,
- {},
- {
- "npu_active_cycles",
- "npu_axi0_rd_data_beat_received",
- "npu_axi0_wr_data_beat_written",
- "npu_axi1_rd_data_beat_received",
- "npu_idle_cycles",
- "npu_total_cycles",
- },
- ),
- (
- ["sample text"],
- False,
- {},
- {
- "npu_active_cycles",
- "npu_axi0_rd_data_beat_received",
- "npu_axi0_wr_data_beat_written",
- "npu_axi1_rd_data_beat_received",
- "npu_idle_cycles",
- "npu_total_cycles",
- },
- ),
- (
- [_mock_encode_b64({"NPU AXI0_RD_DATA_BEAT_RECEIVED": 123})],
- False,
- {"npu_axi0_rd_data_beat_received": 123},
- {
- "npu_active_cycles",
- "npu_axi0_wr_data_beat_written",
- "npu_axi1_rd_data_beat_received",
- "npu_idle_cycles",
- "npu_total_cycles",
- },
- ),
- (
- [
- _mock_encode_b64(
- {
- "NPU AXI0_RD_DATA_BEAT_RECEIVED": 1,
- "NPU AXI0_WR_DATA_BEAT_WRITTEN": 2,
- "NPU AXI1_RD_DATA_BEAT_RECEIVED": 3,
- "NPU ACTIVE": 4,
- "NPU IDLE": 5,
- "NPU TOTAL": 6,
- }
- )
- ],
- True,
- {
- "npu_axi0_rd_data_beat_received": 1,
- "npu_axi0_wr_data_beat_written": 2,
- "npu_axi1_rd_data_beat_received": 3,
- "npu_active_cycles": 4,
- "npu_idle_cycles": 5,
- "npu_total_cycles": 6,
- },
- set(),
- ),
+ [],
+ ["NPU IDLE: 123"],
+ ["<metrics>123</metrics>"],
],
)
-def test_generic_inference_output_parser(
- data: dict[str, int], is_ready: bool, result: dict, missed_keys: set[str]
-) -> None:
- """Test generic runner output parser."""
- parser = GenericInferenceOutputParser()
+def test_generic_inference_output_parser_failure(wrong_fvp_output: list[str]) -> None:
+ """Test unsuccessful generic inference output parsing."""
+ output_parser = GenericInferenceOutputParser()
- for line in data:
- parser.feed(line)
+ for line in wrong_fvp_output:
+ output_parser(line)
- assert parser.is_ready() == is_ready
- assert parser.result == result
- assert parser.missed_keys() == missed_keys
+ with pytest.raises(ValueError, match="Unable to parse output and get metrics"):
+ output_parser.get_metrics()
@pytest.mark.parametrize(
- "device, system, application, backend, expected_error",
+ "backend_path, fvp, target, mac, model, profile, expected_command",
[
- (
- DeviceInfo(device_type="Ethos-U55", mac=32),
- ("Corstone-300: Cortex-M55+Ethos-U55", True),
- ("Generic Inference Runner: Ethos-U55", True),
- "Corstone-300",
- does_not_raise(),
- ),
- (
- DeviceInfo(device_type="Ethos-U55", mac=32),
- ("Corstone-300: Cortex-M55+Ethos-U55", False),
- ("Generic Inference Runner: Ethos-U55", False),
- "Corstone-300",
- pytest.raises(
- Exception,
- match=r"System Corstone-300: Cortex-M55\+Ethos-U55 is not installed",
- ),
- ),
- (
- DeviceInfo(device_type="Ethos-U55", mac=32),
- ("Corstone-300: Cortex-M55+Ethos-U55", True),
- ("Generic Inference Runner: Ethos-U55", False),
- "Corstone-300",
- pytest.raises(
- Exception,
- match=r"Application Generic Inference Runner: Ethos-U55 "
- r"for the system Corstone-300: Cortex-M55\+Ethos-U55 is not installed",
- ),
- ),
- (
- DeviceInfo(device_type="Ethos-U55", mac=32),
- ("Corstone-310: Cortex-M85+Ethos-U55", True),
- ("Generic Inference Runner: Ethos-U55", True),
- "Corstone-310",
- does_not_raise(),
- ),
- (
- DeviceInfo(device_type="Ethos-U55", mac=32),
- ("Corstone-310: Cortex-M85+Ethos-U55", False),
- ("Generic Inference Runner: Ethos-U55", False),
- "Corstone-310",
- pytest.raises(
- Exception,
- match=r"System Corstone-310: Cortex-M85\+Ethos-U55 is not installed",
- ),
- ),
- (
- DeviceInfo(device_type="Ethos-U55", mac=32),
- ("Corstone-310: Cortex-M85+Ethos-U55", True),
- ("Generic Inference Runner: Ethos-U55", False),
- "Corstone-310",
- pytest.raises(
- Exception,
- match=r"Application Generic Inference Runner: Ethos-U55 "
- r"for the system Corstone-310: Cortex-M85\+Ethos-U55 is not installed",
- ),
- ),
- (
- DeviceInfo(device_type="Ethos-U65", mac=512),
- ("Corstone-300: Cortex-M55+Ethos-U65", True),
- ("Generic Inference Runner: Ethos-U65", True),
- "Corstone-300",
- does_not_raise(),
- ),
- (
- DeviceInfo(device_type="Ethos-U65", mac=512),
- ("Corstone-300: Cortex-M55+Ethos-U65", False),
- ("Generic Inference Runner: Ethos-U65", False),
- "Corstone-300",
- pytest.raises(
- Exception,
- match=r"System Corstone-300: Cortex-M55\+Ethos-U65 is not installed",
- ),
- ),
- (
- DeviceInfo(device_type="Ethos-U65", mac=512),
- ("Corstone-300: Cortex-M55+Ethos-U65", True),
- ("Generic Inference Runner: Ethos-U65", False),
+ [
+ Path("backend_path"),
"Corstone-300",
- pytest.raises(
- Exception,
- match=r"Application Generic Inference Runner: Ethos-U65 "
- r"for the system Corstone-300: Cortex-M55\+Ethos-U65 is not installed",
+ "ethos-u55",
+ 256,
+ Path("model.tflite"),
+ "default",
+ Command(
+ [
+ "backend_path/FVP_Corstone_SSE-300_Ethos-U55",
+ "-a",
+ "apps/backends/applications/"
+ "inference_runner-sse-300-22.08.02-ethos-U55-Default-noTA/"
+ "ethos-u-inference_runner.axf",
+ "--data",
+ "model.tflite@0x90000000",
+ "-C",
+ "ethosu.num_macs=256",
+ "-C",
+ "mps3_board.telnetterminal0.start_telnet=0",
+ "-C",
+ "mps3_board.uart0.out_file='-'",
+ "-C",
+ "mps3_board.uart0.shutdown_on_eot=1",
+ "-C",
+ "mps3_board.visualisation.disable-visualisation=1",
+ "--stat",
+ ]
),
- ),
- (
- DeviceInfo(device_type="Ethos-U65", mac=512),
- ("Corstone-310: Cortex-M85+Ethos-U65", True),
- ("Generic Inference Runner: Ethos-U65", True),
- "Corstone-310",
- does_not_raise(),
- ),
- (
- DeviceInfo(device_type="Ethos-U65", mac=512),
- ("Corstone-310: Cortex-M85+Ethos-U65", False),
- ("Generic Inference Runner: Ethos-U65", False),
- "Corstone-310",
- pytest.raises(
- Exception,
- match=r"System Corstone-310: Cortex-M85\+Ethos-U65 is not installed",
- ),
- ),
- (
- DeviceInfo(device_type="Ethos-U65", mac=512),
- ("Corstone-310: Cortex-M85+Ethos-U65", True),
- ("Generic Inference Runner: Ethos-U65", False),
- "Corstone-310",
- pytest.raises(
- Exception,
- match=r"Application Generic Inference Runner: Ethos-U65 "
- r"for the system Corstone-310: Cortex-M85\+Ethos-U65 is not installed",
- ),
- ),
- (
- DeviceInfo(
- device_type="unknown_device", # type: ignore
- mac=None, # type: ignore
- ),
- ("some_system", False),
- ("some_application", False),
- "some backend",
- pytest.raises(Exception, match="Unsupported device unknown_device"),
- ),
+ ],
],
)
-def test_estimate_performance(
- device: DeviceInfo,
- system: tuple[str, bool],
- application: tuple[str, bool],
- backend: str,
- expected_error: Any,
- test_tflite_model: Path,
- backend_runner: MagicMock,
+def test_build_corsone_command(
+ monkeypatch: pytest.MonkeyPatch,
+ backend_path: Path,
+ fvp: str,
+ target: str,
+ mac: int,
+ model: Path,
+ profile: str,
+ expected_command: Command,
) -> None:
- """Test getting performance estimations."""
- system_name, system_installed = system
- application_name, application_installed = application
-
- backend_runner.is_system_installed.return_value = system_installed
- backend_runner.is_application_installed.return_value = application_installed
-
- mock_context = create_mock_context(
- [
- _mock_encode_b64(
- {
- "NPU AXI0_RD_DATA_BEAT_RECEIVED": 1,
- "NPU AXI0_WR_DATA_BEAT_WRITTEN": 2,
- "NPU AXI1_RD_DATA_BEAT_RECEIVED": 3,
- "NPU ACTIVE": 4,
- "NPU IDLE": 5,
- "NPU TOTAL": 6,
- }
- )
- ]
+ """Test function build_corstone_command."""
+ monkeypatch.setattr(
+ "mlia.backend.corstone.performance.get_mlia_resources", lambda: Path("apps")
)
- backend_runner.run_application.return_value = mock_context
+ command = build_corstone_command(backend_path, fvp, target, mac, model, profile)
+ assert command == expected_command
- with expected_error:
- perf_metrics = estimate_performance(
- ModelInfo(test_tflite_model), device, backend
- )
-
- assert isinstance(perf_metrics, PerformanceMetrics)
- assert perf_metrics == PerformanceMetrics(
- npu_axi0_rd_data_beat_received=1,
- npu_axi0_wr_data_beat_written=2,
- npu_axi1_rd_data_beat_received=3,
- npu_active_cycles=4,
- npu_idle_cycles=5,
- npu_total_cycles=6,
- )
-
- assert backend_runner.is_system_installed.called_once_with(system_name)
- assert backend_runner.is_application_installed.called_once_with(
- application_name, system_name
- )
-
-
-@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
-def test_estimate_performance_insufficient_data(
- backend_runner: MagicMock, test_tflite_model: Path, backend: str
-) -> None:
- """Test that performance could not be estimated when not all data presented."""
- backend_runner.is_system_installed.return_value = True
- backend_runner.is_application_installed.return_value = True
-
- no_total_cycles_output = {
- "NPU AXI0_RD_DATA_BEAT_RECEIVED": 1,
- "NPU AXI0_WR_DATA_BEAT_WRITTEN": 2,
- "NPU AXI1_RD_DATA_BEAT_RECEIVED": 3,
- "NPU ACTIVE": 4,
- "NPU IDLE": 5,
- }
- mock_context = create_mock_context([_mock_encode_b64(no_total_cycles_output)])
-
- backend_runner.run_application.return_value = mock_context
+def test_get_metrics_wrong_fvp() -> None:
+ """Test that command construction should fail for wrong FVP."""
with pytest.raises(
- Exception, match="Unable to get performance metrics, insufficient data"
+ BackendExecutionFailed, match=r"Unable to construct a command line for some_fvp"
):
- device = DeviceInfo(device_type="Ethos-U55", mac=32)
- estimate_performance(ModelInfo(test_tflite_model), device, backend)
-
-
-def create_mock_process(stdout: list[str], stderr: list[str]) -> MagicMock:
- """Mock underlying process."""
- mock_process = MagicMock()
- mock_process.poll.return_value = 0
- type(mock_process).stdout = PropertyMock(return_value=iter(stdout))
- type(mock_process).stderr = PropertyMock(return_value=iter(stderr))
- return mock_process
-
-
-def create_mock_context(stdout: list[str]) -> ExecutionContext:
- """Mock ExecutionContext."""
- ctx = ExecutionContext(
- app=get_application("application_1")[0],
- app_params=[],
- system=get_system("System 1"),
- system_params=[],
- )
- ctx.stdout = bytearray("\n".join(stdout).encode("utf-8"))
- return ctx
-
-
-@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
-def test_estimate_performance_invalid_output(
- test_tflite_model: Path, backend_runner: MagicMock, backend: str
-) -> None:
- """Test estimation could not be done if inference produces unexpected output."""
- backend_runner.is_system_installed.return_value = True
- backend_runner.is_application_installed.return_value = True
-
- mock_context = create_mock_context(["Something", "is", "wrong"])
- backend_runner.run_application.return_value = mock_context
-
- with pytest.raises(Exception, match="Unable to get performance metrics"):
- estimate_performance(
- ModelInfo(test_tflite_model),
- DeviceInfo(device_type="Ethos-U55", mac=256),
- backend=backend,
+ get_metrics(
+ Path("backend_path"),
+ "some_fvp",
+ "ethos-u55",
+ 256,
+ Path("model.tflite"),
)
-@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
-def test_get_generic_runner(backend: str) -> None:
- """Test function get_generic_runner()."""
- device_info = DeviceInfo("Ethos-U55", 256)
-
- runner = get_generic_runner(device_info=device_info, backend=backend)
- assert isinstance(runner, GenericInferenceRunnerEthosU)
-
- with pytest.raises(RuntimeError):
- get_generic_runner(device_info=device_info, backend="UNKNOWN_BACKEND")
-
-
-@pytest.mark.parametrize(
- ("backend", "device_type"),
- (
- ("Corstone-300", "Ethos-U55"),
- ("Corstone-300", "Ethos-U65"),
- ("Corstone-310", "Ethos-U55"),
- ("ArmNNTFLiteDelegate", "Cortex-A"),
- ("TOSA-Checker", "TOSA"),
- ("Corstone-300", None),
- ),
-)
-def test_backend_support(backend: str, device_type: str) -> None:
- """Test backend & device support."""
- assert is_supported(backend)
- assert is_supported(backend, device_type)
-
- assert backend in get_supported_backends()
-
-
-class TestGenericInferenceRunnerEthosU:
- """Test for the class GenericInferenceRunnerEthosU."""
+def test_estimate_performance(monkeypatch: pytest.MonkeyPatch) -> None:
+ """Test function estimate_performance."""
+ mock_repository = MagicMock()
+ mock_repository.get_backend_settings.return_value = Path("backend_path"), {
+ "profile": "default"
+ }
- @staticmethod
- @pytest.mark.parametrize(
- "device, backend, expected_system, expected_app",
- [
- [
- DeviceInfo("Ethos-U55", 256),
- "Corstone-300",
- "Corstone-300: Cortex-M55+Ethos-U55",
- "Generic Inference Runner: Ethos-U55",
- ],
- [
- DeviceInfo("Ethos-U65", 256),
- "Corstone-300",
- "Corstone-300: Cortex-M55+Ethos-U65",
- "Generic Inference Runner: Ethos-U65",
- ],
- [
- DeviceInfo("Ethos-U55", 256),
- "Corstone-310",
- "Corstone-310: Cortex-M85+Ethos-U55",
- "Generic Inference Runner: Ethos-U55",
- ],
- [
- DeviceInfo("Ethos-U65", 256),
- "Corstone-310",
- "Corstone-310: Cortex-M85+Ethos-U65",
- "Generic Inference Runner: Ethos-U65",
- ],
- ],
+ monkeypatch.setattr(
+ "mlia.backend.corstone.performance.get_backend_repository",
+ lambda: mock_repository,
)
- def test_artifact_resolver(
- device: DeviceInfo, backend: str, expected_system: str, expected_app: str
- ) -> None:
- """Test artifact resolving based on the provided parameters."""
- generic_runner = get_generic_runner(device, backend)
- assert isinstance(generic_runner, GenericInferenceRunnerEthosU)
-
- assert generic_runner.system_name == expected_system
- assert generic_runner.app_name == expected_app
-
- @staticmethod
- def test_artifact_resolver_unsupported_backend() -> None:
- """Test that it should be not possible to use unsupported backends."""
- with pytest.raises(
- RuntimeError, match="Unsupported device Ethos-U65 for backend test_backend"
- ):
- get_generic_runner(DeviceInfo("Ethos-U65", 256), "test_backend")
-
- @staticmethod
- @pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
- def test_inference_should_fail_if_system_not_installed(
- backend_runner: MagicMock, test_tflite_model: Path, backend: str
- ) -> None:
- """Test that inference should fail if system is not installed."""
- backend_runner.is_system_installed.return_value = False
- generic_runner = get_generic_runner(DeviceInfo("Ethos-U55", 256), backend)
- with pytest.raises(
- Exception,
- match=r"System Corstone-3[01]0: Cortex-M[58]5\+Ethos-U55 is not installed",
- ):
- generic_runner.run(ModelInfo(test_tflite_model), [])
+ def command_output_mock(_command: Command) -> Generator[str, None, None]:
+ """Mock FVP output."""
+ yield from valid_fvp_output()
- @staticmethod
- @pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
- def test_inference_should_fail_is_apps_not_installed(
- backend_runner: MagicMock, test_tflite_model: Path, backend: str
- ) -> None:
- """Test that inference should fail if apps are not installed."""
- backend_runner.is_system_installed.return_value = True
- backend_runner.is_application_installed.return_value = False
+ monkeypatch.setattr("mlia.utils.proc.command_output", command_output_mock)
- generic_runner = get_generic_runner(DeviceInfo("Ethos-U55", 256), backend)
- with pytest.raises(
- Exception,
- match="Application Generic Inference Runner: Ethos-U55"
- r" for the system Corstone-3[01]0: Cortex-M[58]5\+Ethos-U55 is not "
- r"installed",
- ):
- generic_runner.run(ModelInfo(test_tflite_model), [])
-
-
-@pytest.fixture(name="backend_runner")
-def fixture_backend_runner(monkeypatch: pytest.MonkeyPatch) -> MagicMock:
- """Mock backend runner."""
- backend_runner_mock = MagicMock(spec=BackendRunner)
- monkeypatch.setattr(
- "mlia.backend.corstone.performance.get_backend_runner",
- MagicMock(return_value=backend_runner_mock),
+ result = estimate_performance(
+ "ethos-u55", 256, Path("model.tflite"), "Corstone-300"
)
- return backend_runner_mock
+ assert result == PerformanceMetrics(1, 2, 3, 4, 5, 6)
+
+ mock_repository.get_backend_settings.assert_called_once()
diff --git a/tests/test_backend_executor_application.py b/tests/test_backend_executor_application.py
deleted file mode 100644
index 8962a0a..0000000
--- a/tests/test_backend_executor_application.py
+++ /dev/null
@@ -1,422 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Tests for the application backend."""
-from __future__ import annotations
-
-from collections import Counter
-from contextlib import ExitStack as does_not_raise
-from pathlib import Path
-from typing import Any
-from unittest.mock import MagicMock
-
-import pytest
-
-from mlia.backend.executor.application import Application
-from mlia.backend.executor.application import get_application
-from mlia.backend.executor.application import (
- get_available_application_directory_names,
-)
-from mlia.backend.executor.application import get_available_applications
-from mlia.backend.executor.application import get_unique_application_names
-from mlia.backend.executor.application import install_application
-from mlia.backend.executor.application import load_applications
-from mlia.backend.executor.application import remove_application
-from mlia.backend.executor.common import Command
-from mlia.backend.executor.common import Param
-from mlia.backend.executor.common import UserParamConfig
-from mlia.backend.executor.config import ApplicationConfig
-from mlia.backend.executor.config import ExtendedApplicationConfig
-from mlia.backend.executor.config import NamedExecutionConfig
-
-
-def test_get_available_application_directory_names() -> None:
- """Test get_available_applicationss mocking get_resources."""
- directory_names = get_available_application_directory_names()
- assert Counter(directory_names) == Counter(
- [
- "application1",
- "application2",
- "application4",
- "application5",
- "application6",
- ]
- )
-
-
-def test_get_available_applications() -> None:
- """Test get_available_applicationss mocking get_resources."""
- available_applications = get_available_applications()
-
- assert all(isinstance(s, Application) for s in available_applications)
- assert all(s != 42 for s in available_applications)
- assert len(available_applications) == 10
- # application_5 has multiply items with multiply supported systems
- assert [str(s) for s in available_applications] == [
- "application_1",
- "application_2",
- "application_4",
- "application_5",
- "application_5",
- "application_5A",
- "application_5A",
- "application_5B",
- "application_5B",
- "application_6",
- ]
-
-
-def test_get_unique_application_names() -> None:
- """Test get_unique_application_names."""
- unique_names = get_unique_application_names()
-
- assert all(isinstance(s, str) for s in unique_names)
- assert all(s for s in unique_names)
- assert sorted(unique_names) == [
- "application_1",
- "application_2",
- "application_4",
- "application_5",
- "application_5A",
- "application_5B",
- "application_6",
- ]
-
-
-def test_get_application() -> None:
- """Test get_application mocking get_resoures."""
- application = get_application("application_1")
- if len(application) != 1:
- pytest.fail("Unable to get application")
- assert application[0].name == "application_1"
-
- application = get_application("unknown application")
- assert len(application) == 0
-
-
-@pytest.mark.parametrize(
- "source, call_count, expected_exception",
- (
- (
- "archives/applications/application1.tar.gz",
- 0,
- pytest.raises(
- Exception, match=r"Applications \[application_1\] are already installed"
- ),
- ),
- (
- "various/applications/application_with_empty_config",
- 0,
- pytest.raises(Exception, match="No application definition found"),
- ),
- (
- "various/applications/application_with_wrong_config1",
- 0,
- pytest.raises(Exception, match="Unable to read application definition"),
- ),
- (
- "various/applications/application_with_wrong_config2",
- 0,
- pytest.raises(Exception, match="Unable to read application definition"),
- ),
- (
- "various/applications/application_with_wrong_config3",
- 0,
- pytest.raises(Exception, match="Unable to read application definition"),
- ),
- ("various/applications/application_with_valid_config", 1, does_not_raise()),
- (
- "archives/applications/application3.tar.gz",
- 0,
- pytest.raises(Exception, match="Unable to read application definition"),
- ),
- (
- "backends/applications/application1",
- 0,
- pytest.raises(
- Exception, match=r"Applications \[application_1\] are already installed"
- ),
- ),
- (
- "backends/applications/application3",
- 0,
- pytest.raises(Exception, match="Unable to read application definition"),
- ),
- ),
-)
-def test_install_application(
- monkeypatch: Any,
- test_resources_path: Path,
- source: str,
- call_count: int,
- expected_exception: Any,
-) -> None:
- """Test application install from archive."""
- mock_create_destination_and_install = MagicMock()
- monkeypatch.setattr(
- "mlia.backend.executor.application.create_destination_and_install",
- mock_create_destination_and_install,
- )
-
- with expected_exception:
- install_application(test_resources_path / source)
- assert mock_create_destination_and_install.call_count == call_count
-
-
-def test_remove_application(monkeypatch: Any) -> None:
- """Test application removal."""
- mock_remove_backend = MagicMock()
- monkeypatch.setattr(
- "mlia.backend.executor.application.remove_backend", mock_remove_backend
- )
-
- remove_application("some_application_directory")
- mock_remove_backend.assert_called_once()
-
-
-def test_application_config_without_commands() -> None:
- """Test application config without commands."""
- config = ApplicationConfig(name="application")
- application = Application(config)
- # pylint: disable=use-implicit-booleaness-not-comparison
- assert application.commands == {}
-
-
-class TestApplication:
- """Test for application class methods."""
-
- def test___eq__(self) -> None:
- """Test overloaded __eq__ method."""
- config = ApplicationConfig(
- # Application
- supported_systems=["system1", "system2"],
- # inherited from Backend
- name="name",
- description="description",
- commands={},
- )
- application1 = Application(config)
- application2 = Application(config) # Identical
- assert application1 == application2
-
- application3 = Application(config) # changed
- # Change one single attribute so not equal, but same Type
- setattr(application3, "supported_systems", ["somewhere/else"])
- assert application1 != application3
-
- # different Type
- application4 = "Not the Application you are looking for"
- assert application1 != application4
-
- application5 = Application(config)
- # supported systems could be in any order
- setattr(application5, "supported_systems", ["system2", "system1"])
- assert application1 == application5
-
- def test_can_run_on(self) -> None:
- """Test Application can run on."""
- config = ApplicationConfig(name="application", supported_systems=["System-A"])
-
- application = Application(config)
- assert application.can_run_on("System-A")
- assert not application.can_run_on("System-B")
-
- applications = get_application("application_1", "System 1")
- assert len(applications) == 1
- assert applications[0].can_run_on("System 1")
-
- def test_unable_to_create_application_without_name(self) -> None:
- """Test that it is not possible to create application without name."""
- with pytest.raises(Exception, match="Name is empty"):
- Application(ApplicationConfig())
-
- def test_application_config_without_commands(self) -> None:
- """Test application config without commands."""
- config = ApplicationConfig(name="application")
- application = Application(config)
- # pylint: disable=use-implicit-booleaness-not-comparison
- assert application.commands == {}
-
- @pytest.mark.parametrize(
- "config, expected_params",
- (
- (
- ApplicationConfig(
- name="application",
- commands={"command": ["cmd {user_params:0} {user_params:1}"]},
- user_params={
- "command": [
- UserParamConfig(
- name="--param1", description="param1", alias="param1"
- ),
- UserParamConfig(
- name="--param2", description="param2", alias="param2"
- ),
- ]
- },
- ),
- [Param("--param1", "param1"), Param("--param2", "param2")],
- ),
- (
- ApplicationConfig(
- name="application",
- commands={"command": ["cmd {user_params:param1} {user_params:1}"]},
- user_params={
- "command": [
- UserParamConfig(
- name="--param1", description="param1", alias="param1"
- ),
- UserParamConfig(
- name="--param2", description="param2", alias="param2"
- ),
- ]
- },
- ),
- [Param("--param1", "param1"), Param("--param2", "param2")],
- ),
- (
- ApplicationConfig(
- name="application",
- commands={"command": ["cmd {user_params:param1}"]},
- user_params={
- "command": [
- UserParamConfig(
- name="--param1", description="param1", alias="param1"
- ),
- UserParamConfig(
- name="--param2", description="param2", alias="param2"
- ),
- ]
- },
- ),
- [Param("--param1", "param1")],
- ),
- ),
- )
- def test_remove_unused_params(
- self, config: ApplicationConfig, expected_params: list[Param]
- ) -> None:
- """Test mod remove_unused_parameter."""
- application = Application(config)
- application.remove_unused_params()
- assert application.commands["command"].params == expected_params
-
-
-@pytest.mark.parametrize(
- "config, expected_error",
- (
- (
- ExtendedApplicationConfig(name="application"),
- pytest.raises(Exception, match="No supported systems definition provided"),
- ),
- (
- ExtendedApplicationConfig(
- name="application", supported_systems=[NamedExecutionConfig(name="")]
- ),
- pytest.raises(
- Exception,
- match="Unable to read supported system definition, name is missed",
- ),
- ),
- (
- ExtendedApplicationConfig(
- name="application",
- supported_systems=[
- NamedExecutionConfig(
- name="system",
- commands={"command": ["cmd"]},
- user_params={"command": [UserParamConfig(name="param")]},
- )
- ],
- commands={"command": ["cmd {user_params:0}"]},
- user_params={"command": [UserParamConfig(name="param")]},
- ),
- pytest.raises(
- Exception, match="Default parameters for command .* should have aliases"
- ),
- ),
- (
- ExtendedApplicationConfig(
- name="application",
- supported_systems=[
- NamedExecutionConfig(
- name="system",
- commands={"command": ["cmd"]},
- user_params={"command": [UserParamConfig(name="param")]},
- )
- ],
- commands={"command": ["cmd {user_params:0}"]},
- user_params={"command": [UserParamConfig(name="param", alias="param")]},
- ),
- pytest.raises(
- Exception, match="system parameters for command .* should have aliases"
- ),
- ),
- ),
-)
-def test_load_application_exceptional_cases(
- config: ExtendedApplicationConfig, expected_error: Any
-) -> None:
- """Test exceptional cases for application load function."""
- with expected_error:
- load_applications(config)
-
-
-def test_load_application() -> None:
- """Test application load function.
-
- The main purpose of this test is to test configuration for application
- for different systems. All configuration should be correctly
- overridden if needed.
- """
- application_5 = get_application("application_5")
- assert len(application_5) == 2
-
- default_commands = {
- "build": Command(["default build command"]),
- "run": Command(["default run command"]),
- }
- default_variables = {"var1": "value1", "var2": "value2"}
-
- application_5_0 = application_5[0]
- assert application_5_0.supported_systems == ["System 1"]
- assert application_5_0.commands == default_commands
- assert application_5_0.variables == default_variables
-
- application_5_1 = application_5[1]
- assert application_5_1.supported_systems == ["System 2"]
- assert application_5_1.commands == application_5_1.commands
- assert application_5_1.variables == default_variables
-
- application_5a = get_application("application_5A")
- assert len(application_5a) == 2
-
- application_5a_0 = application_5a[0]
- assert application_5a_0.supported_systems == ["System 1"]
- assert application_5a_0.commands == default_commands
- assert application_5a_0.variables == {"var1": "new value1", "var2": "value2"}
-
- application_5a_1 = application_5a[1]
- assert application_5a_1.supported_systems == ["System 2"]
- assert application_5a_1.commands == {
- "build": default_commands["build"],
- "run": Command(["run command on system 2"]),
- }
- assert application_5a_1.variables == {"var1": "value1", "var2": "new value2"}
-
- application_5b = get_application("application_5B")
- assert len(application_5b) == 2
-
- application_5b_0 = application_5b[0]
- assert application_5b_0.supported_systems == ["System 1"]
- assert application_5b_0.commands == {
- "build": Command(["default build command with value for var1 System1"]),
- "run": Command(["default run command with value for var2 System1"]),
- }
- assert "non_used_command" not in application_5b_0.commands
-
- application_5b_1 = application_5b[1]
- assert application_5b_1.supported_systems == ["System 2"]
- assert application_5b_1.commands == {
- "build": Command(["default build command with value for var1 System2"]),
- "run": Command(["run command on system 2"], []),
- }
diff --git a/tests/test_backend_executor_common.py b/tests/test_backend_executor_common.py
deleted file mode 100644
index e881462..0000000
--- a/tests/test_backend_executor_common.py
+++ /dev/null
@@ -1,482 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-# pylint: disable=protected-access
-"""Tests for the common backend module."""
-from __future__ import annotations
-
-from contextlib import ExitStack as does_not_raise
-from pathlib import Path
-from typing import Any
-from typing import cast
-from typing import IO
-from typing import List
-from unittest.mock import MagicMock
-
-import pytest
-
-from mlia.backend.executor.application import Application
-from mlia.backend.executor.common import Backend
-from mlia.backend.executor.common import BaseBackendConfig
-from mlia.backend.executor.common import Command
-from mlia.backend.executor.common import ConfigurationException
-from mlia.backend.executor.common import load_config
-from mlia.backend.executor.common import Param
-from mlia.backend.executor.common import parse_raw_parameter
-from mlia.backend.executor.common import remove_backend
-from mlia.backend.executor.config import ApplicationConfig
-from mlia.backend.executor.config import UserParamConfig
-from mlia.backend.executor.execution import ExecutionContext
-from mlia.backend.executor.execution import ParamResolver
-from mlia.backend.executor.system import System
-
-
-@pytest.mark.parametrize(
- "directory_name, expected_exception",
- (
- ("some_dir", does_not_raise()),
- (None, pytest.raises(Exception, match="No directory name provided")),
- ),
-)
-def test_remove_backend(
- monkeypatch: Any, directory_name: str, expected_exception: Any
-) -> None:
- """Test remove_backend function."""
- mock_remove_resource = MagicMock()
- monkeypatch.setattr(
- "mlia.backend.executor.common.remove_resource", mock_remove_resource
- )
-
- with expected_exception:
- remove_backend(directory_name, "applications")
-
-
-@pytest.mark.parametrize(
- "filename, expected_exception",
- (
- ("application_config.json", does_not_raise()),
- (None, pytest.raises(Exception, match="Unable to read config")),
- ),
-)
-def test_load_config(
- filename: str, expected_exception: Any, test_resources_path: Path, monkeypatch: Any
-) -> None:
- """Test load_config."""
- with expected_exception:
- configs: list[Path | IO[bytes] | None] = (
- [None]
- if not filename
- else [
- # Ignore pylint warning as 'with' can't be used inside of a
- # generator expression.
- # pylint: disable=consider-using-with
- open(test_resources_path / filename, "rb"),
- test_resources_path / filename,
- ]
- )
- for config in configs:
- json_mock = MagicMock()
- monkeypatch.setattr("mlia.backend.executor.common.json.load", json_mock)
- load_config(config)
- json_mock.assert_called_once()
-
-
-class TestBackend:
- """Test Backend class."""
-
- def test___repr__(self) -> None:
- """Test the representation of Backend instance."""
- backend = Backend(
- BaseBackendConfig(name="Testing name", description="Testing description")
- )
- assert str(backend) == "Testing name"
-
- def test__eq__(self) -> None:
- """Test equality method with different cases."""
- backend1 = Backend(BaseBackendConfig(name="name", description="description"))
- backend1.commands = {"command": Command(["command"])}
-
- backend2 = Backend(BaseBackendConfig(name="name", description="description"))
- backend2.commands = {"command": Command(["command"])}
-
- backend3 = Backend(
- BaseBackendConfig(
- name="Ben", description="This is not the Backend you are looking for"
- )
- )
- backend3.commands = {"wave": Command(["wave hand"])}
-
- backend4 = "Foo" # checking not isinstance(backend4, Backend)
-
- assert backend1 == backend2
- assert backend1 != backend3
- assert backend1 != backend4
-
- @pytest.mark.parametrize(
- "parameter, valid",
- [
- ("--choice-param value_1", True),
- ("--choice-param wrong_value", False),
- ("--open-param something", True),
- ("--wrong-param value", False),
- ],
- )
- def test_validate_parameter(
- self, parameter: str, valid: bool, test_resources_path: Path
- ) -> None:
- """Test validate_parameter."""
- config = cast(
- List[ApplicationConfig],
- load_config(test_resources_path / "hello_world.json"),
- )
- # The application configuration is a list of configurations so we need
- # only the first one
- # Exercise the validate_parameter test using the Application classe which
- # inherits from Backend.
- application = Application(config[0])
- assert application.validate_parameter("run", parameter) == valid
-
- def test_validate_parameter_with_invalid_command(
- self, test_resources_path: Path
- ) -> None:
- """Test validate_parameter with an invalid command_name."""
- config = cast(
- List[ApplicationConfig],
- load_config(test_resources_path / "hello_world.json"),
- )
- application = Application(config[0])
- with pytest.raises(AttributeError) as err:
- # command foo does not exist, so raise an error
- application.validate_parameter("foo", "bar")
- assert "Unknown command: 'foo'" in str(err.value)
-
- def test_build_command(self) -> None:
- """Test command building."""
- config = {
- "name": "test",
- "commands": {
- "build": ["build {user_params:0} {user_params:1}"],
- "run": ["run {user_params:0}"],
- "post_run": ["post_run {application_params:0} on {system_params:0}"],
- "some_command": ["Command with {variables:var_A}"],
- "empty_command": [""],
- },
- "user_params": {
- "build": [
- {
- "name": "choice_param_0=",
- "values": [1, 2, 3],
- "default_value": 1,
- },
- {"name": "choice_param_1", "values": [3, 4, 5], "default_value": 3},
- {"name": "choice_param_3", "values": [6, 7, 8]},
- ],
- "run": [{"name": "flag_param_0"}],
- },
- "variables": {"var_A": "value for variable A"},
- }
-
- application, system = Application(config), System(config) # type: ignore
- context = ExecutionContext(
- app=application,
- app_params=[],
- system=system,
- system_params=[],
- )
-
- param_resolver = ParamResolver(context)
-
- cmd = application.build_command(
- "build", ["choice_param_0=2", "choice_param_1=4"], param_resolver
- )
- assert cmd == ["build choice_param_0=2 choice_param_1 4"]
-
- cmd = application.build_command("build", ["choice_param_0=2"], param_resolver)
- assert cmd == ["build choice_param_0=2 choice_param_1 3"]
-
- cmd = application.build_command(
- "build", ["choice_param_0=2", "choice_param_3=7"], param_resolver
- )
- assert cmd == ["build choice_param_0=2 choice_param_1 3"]
-
- with pytest.raises(
- ConfigurationException, match="Command 'foo' could not be found."
- ):
- application.build_command("foo", [""], param_resolver)
-
- cmd = application.build_command("some_command", [], param_resolver)
- assert cmd == ["Command with value for variable A"]
-
- cmd = application.build_command("empty_command", [], param_resolver)
- assert cmd == [""]
-
- @pytest.mark.parametrize("class_", [Application, System])
- def test_build_command_unknown_variable(self, class_: type) -> None:
- """Test that unable to construct backend with unknown variable."""
- with pytest.raises(Exception, match="Unknown variable var1"):
- config = {"name": "test", "commands": {"run": ["run {variables:var1}"]}}
- class_(config)
-
- @pytest.mark.parametrize(
- "class_, config, expected_output",
- [
- (
- Application,
- {
- "name": "test",
- "commands": {
- "build": ["build {user_params:0} {user_params:1}"],
- "run": ["run {user_params:0}"],
- },
- "user_params": {
- "build": [
- {
- "name": "choice_param_0=",
- "values": ["a", "b", "c"],
- "default_value": "a",
- "alias": "param_1",
- },
- {
- "name": "choice_param_1",
- "values": ["a", "b", "c"],
- "default_value": "a",
- "alias": "param_2",
- },
- {"name": "choice_param_3", "values": ["a", "b", "c"]},
- ],
- "run": [{"name": "flag_param_0"}],
- },
- },
- [
- (
- "b",
- Param(
- name="choice_param_0=",
- description="",
- values=["a", "b", "c"],
- default_value="a",
- alias="param_1",
- ),
- ),
- (
- "a",
- Param(
- name="choice_param_1",
- description="",
- values=["a", "b", "c"],
- default_value="a",
- alias="param_2",
- ),
- ),
- (
- "c",
- Param(
- name="choice_param_3",
- description="",
- values=["a", "b", "c"],
- ),
- ),
- ],
- ),
- (System, {"name": "test"}, []),
- ],
- )
- def test_resolved_parameters(
- self,
- class_: type,
- config: dict,
- expected_output: list[tuple[str | None, Param]],
- ) -> None:
- """Test command building."""
- backend = class_(config)
-
- params = backend.resolved_parameters(
- "build", ["choice_param_0=b", "choice_param_3=c"]
- )
- assert params == expected_output
-
- @pytest.mark.parametrize(
- ["param_name", "user_param", "expected_value"],
- [
- (
- "test_name",
- "test_name=1234",
- "1234",
- ), # optional parameter using '='
- (
- "test_name",
- "test_name 1234",
- "1234",
- ), # optional parameter using ' '
- ("test_name", "test_name", None), # flag
- (None, "test_name=1234", "1234"), # positional parameter
- ],
- )
- def test_resolved_user_parameters(
- self, param_name: str, user_param: str, expected_value: str
- ) -> None:
- """Test different variants to provide user parameters."""
- # A sample config providing one backend config
- config = {
- "name": "test_backend",
- "commands": {
- "test": ["user_param:test_param"],
- },
- "user_params": {
- "test": [UserParamConfig(name=param_name, alias="test_name")],
- },
- }
- backend = Backend(cast(BaseBackendConfig, config))
- params = backend.resolved_parameters(
- command_name="test", user_params=[user_param]
- )
- assert len(params) == 1
- value, param = params[0]
- assert param_name == param.name
- assert expected_value == value
-
- @pytest.mark.parametrize(
- "input_param,expected",
- [
- ("--param=1", ("--param", "1")),
- ("--param 1", ("--param", "1")),
- ("--flag", ("--flag", None)),
- ],
- )
- def test__parse_raw_parameter(
- self, input_param: str, expected: tuple[str, str | None]
- ) -> None:
- """Test internal method of parsing a single raw parameter."""
- assert parse_raw_parameter(input_param) == expected
-
-
-class TestParam:
- """Test Param class."""
-
- def test__eq__(self) -> None:
- """Test equality method with different cases."""
- param1 = Param(name="test", description="desc", values=["values"])
- param2 = Param(name="test", description="desc", values=["values"])
- param3 = Param(name="test1", description="desc", values=["values"])
- param4 = object()
-
- assert param1 == param2
- assert param1 != param3
- assert param1 != param4
-
- def test_get_details(self) -> None:
- """Test get_details() method."""
- param1 = Param(name="test", description="desc", values=["values"])
- assert param1.get_details() == {
- "name": "test",
- "values": ["values"],
- "description": "desc",
- }
-
- def test_invalid(self) -> None:
- """Test invalid use cases for the Param class."""
- with pytest.raises(
- ConfigurationException,
- match="Either name, alias or both must be set to identify a parameter.",
- ):
- Param(name=None, description="desc", values=["values"])
-
-
-class TestCommand:
- """Test Command class."""
-
- def test_get_details(self) -> None:
- """Test get_details() method."""
- param1 = Param(name="test", description="desc", values=["values"])
- command1 = Command(command_strings=["echo test"], params=[param1])
- assert command1.get_details() == {
- "command_strings": ["echo test"],
- "user_params": [
- {"name": "test", "values": ["values"], "description": "desc"}
- ],
- }
-
- def test__eq__(self) -> None:
- """Test equality method with different cases."""
- param1 = Param("test", "desc", ["values"])
- param2 = Param("test1", "desc1", ["values1"])
- command1 = Command(command_strings=["echo test"], params=[param1])
- command2 = Command(command_strings=["echo test"], params=[param1])
- command3 = Command(command_strings=["echo test"])
- command4 = Command(command_strings=["echo test"], params=[param2])
- command5 = object()
-
- assert command1 == command2
- assert command1 != command3
- assert command1 != command4
- assert command1 != command5
-
- @pytest.mark.parametrize(
- "params, expected_error",
- [
- [[], does_not_raise()],
- [[Param("param", "param description", [])], does_not_raise()],
- [
- [
- Param("param", "param description", [], None, "alias"),
- Param("param", "param description", [], None),
- ],
- does_not_raise(),
- ],
- [
- [
- Param("param1", "param1 description", [], None, "alias1"),
- Param("param2", "param2 description", [], None, "alias2"),
- ],
- does_not_raise(),
- ],
- [
- [
- Param("param", "param description", [], None, "alias"),
- Param("param", "param description", [], None, "alias"),
- ],
- pytest.raises(ConfigurationException, match="Non-unique aliases alias"),
- ],
- [
- [
- Param("alias", "param description", [], None, "alias1"),
- Param("param", "param description", [], None, "alias"),
- ],
- pytest.raises(
- ConfigurationException,
- match="Aliases .* could not be used as parameter name",
- ),
- ],
- [
- [
- Param("alias", "param description", [], None, "alias"),
- Param("param1", "param1 description", [], None, "alias1"),
- ],
- does_not_raise(),
- ],
- [
- [
- Param("alias", "param description", [], None, "alias"),
- Param("alias", "param1 description", [], None, "alias1"),
- ],
- pytest.raises(
- ConfigurationException,
- match="Aliases .* could not be used as parameter name",
- ),
- ],
- [
- [
- Param("param1", "param1 description", [], None, "alias1"),
- Param("param2", "param2 description", [], None, "alias1"),
- Param("param3", "param3 description", [], None, "alias2"),
- Param("param4", "param4 description", [], None, "alias2"),
- ],
- pytest.raises(
- ConfigurationException, match="Non-unique aliases alias1, alias2"
- ),
- ],
- ],
- )
- def test_validate_params(self, params: list[Param], expected_error: Any) -> None:
- """Test command validation function."""
- with expected_error:
- Command([], params)
diff --git a/tests/test_backend_executor_execution.py b/tests/test_backend_executor_execution.py
deleted file mode 100644
index 6a6ea08..0000000
--- a/tests/test_backend_executor_execution.py
+++ /dev/null
@@ -1,212 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Test backend execution module."""
-from pathlib import Path
-from typing import Any
-from unittest.mock import MagicMock
-
-import pytest
-
-from mlia.backend.executor.application import Application
-from mlia.backend.executor.common import UserParamConfig
-from mlia.backend.executor.config import ApplicationConfig
-from mlia.backend.executor.config import SystemConfig
-from mlia.backend.executor.execution import ExecutionContext
-from mlia.backend.executor.execution import get_application_and_system
-from mlia.backend.executor.execution import get_application_by_name_and_system
-from mlia.backend.executor.execution import ParamResolver
-from mlia.backend.executor.execution import run_application
-from mlia.backend.executor.system import load_system
-
-
-def test_context_param_resolver(tmpdir: Any) -> None:
- """Test parameter resolving."""
- system_config_location = Path(tmpdir) / "system"
- system_config_location.mkdir()
-
- application_config_location = Path(tmpdir) / "application"
- application_config_location.mkdir()
-
- ctx = ExecutionContext(
- app=Application(
- ApplicationConfig(
- name="test_application",
- description="Test application",
- config_location=application_config_location,
- commands={
- "run": [
- "run_command1 {user_params:0}",
- "run_command2 {user_params:1}",
- ]
- },
- variables={"var_1": "value for var_1"},
- user_params={
- "run": [
- UserParamConfig(
- name="--param1",
- description="Param 1",
- default_value="123",
- alias="param_1",
- ),
- UserParamConfig(
- name="--param2", description="Param 2", default_value="456"
- ),
- UserParamConfig(
- name="--param3", description="Param 3", alias="param_3"
- ),
- UserParamConfig(
- name="--param4=",
- description="Param 4",
- default_value="456",
- alias="param_4",
- ),
- UserParamConfig(
- description="Param 5",
- default_value="789",
- alias="param_5",
- ),
- ]
- },
- )
- ),
- app_params=["--param2=789"],
- system=load_system(
- SystemConfig(
- name="test_system",
- description="Test system",
- config_location=system_config_location,
- commands={
- "build": ["build_command1 {user_params:0}"],
- "run": ["run_command {application.commands.run:1}"],
- },
- variables={"var_1": "value for var_1"},
- user_params={
- "build": [
- UserParamConfig(
- name="--param1", description="Param 1", default_value="aaa"
- ),
- UserParamConfig(name="--param2", description="Param 2"),
- ]
- },
- )
- ),
- system_params=["--param1=bbb"],
- )
-
- param_resolver = ParamResolver(ctx)
- expected_values = {
- "application.name": "test_application",
- "application.description": "Test application",
- "application.config_dir": str(application_config_location),
- "application.commands.run:0": "run_command1 --param1 123",
- "application.commands.run.params:0": "123",
- "application.commands.run.params:param_1": "123",
- "application.commands.run:1": "run_command2 --param2 789",
- "application.commands.run.params:1": "789",
- "application.variables:var_1": "value for var_1",
- "system.name": "test_system",
- "system.description": "Test system",
- "system.config_dir": str(system_config_location),
- "system.commands.build:0": "build_command1 --param1 bbb",
- "system.commands.run:0": "run_command run_command2 --param2 789",
- "system.commands.build.params:0": "bbb",
- "system.variables:var_1": "value for var_1",
- }
-
- for param, value in expected_values.items():
- assert param_resolver(param) == value
-
- expected_errors = {
- "application.variables:var_2": pytest.raises(
- Exception, match="Unknown variable var_2"
- ),
- "application.commands.clean:0": pytest.raises(
- Exception, match="Command clean not found"
- ),
- "application.commands.run:2": pytest.raises(
- Exception, match="Invalid index 2 for command run"
- ),
- "application.commands.run.params:5": pytest.raises(
- Exception, match="Invalid parameter index 5 for command run"
- ),
- "application.commands.run.params:param_2": pytest.raises(
- Exception,
- match="No value for parameter with index or alias param_2 of command run",
- ),
- "UNKNOWN": pytest.raises(
- Exception, match="Unable to resolve parameter UNKNOWN"
- ),
- "system.commands.build.params:1": pytest.raises(
- Exception,
- match="No value for parameter with index or alias 1 of command build",
- ),
- "system.commands.build:A": pytest.raises(
- Exception, match="Bad command index A"
- ),
- "system.variables:var_2": pytest.raises(
- Exception, match="Unknown variable var_2"
- ),
- }
- for param, error in expected_errors.items():
- with error:
- param_resolver(param)
-
- resolved_params = ctx.app.resolved_parameters("run", [])
- expected_user_params = {
- "user_params:0": "--param1 123",
- "user_params:param_1": "--param1 123",
- "user_params:2": "--param3",
- "user_params:param_3": "--param3",
- "user_params:3": "--param4=456",
- "user_params:param_4": "--param4=456",
- "user_params:param_5": "789",
- }
- for param, expected_value in expected_user_params.items():
- assert param_resolver(param, "run", resolved_params) == expected_value
-
- with pytest.raises(
- Exception, match="Invalid index 5 for user params of command run"
- ):
- param_resolver("user_params:5", "run", resolved_params)
-
- with pytest.raises(
- Exception, match="No user parameter for command 'run' with alias 'param_2'."
- ):
- param_resolver("user_params:param_2", "run", resolved_params)
-
- with pytest.raises(Exception, match="Unable to resolve user params"):
- param_resolver("user_params:0", "", resolved_params)
-
-
-def test_get_application_by_name_and_system(monkeypatch: Any) -> None:
- """Test exceptional case for get_application_by_name_and_system."""
- monkeypatch.setattr(
- "mlia.backend.executor.execution.get_application",
- MagicMock(return_value=[MagicMock(), MagicMock()]),
- )
-
- with pytest.raises(
- ValueError,
- match="Error during getting application test_application for the "
- "system test_system",
- ):
- get_application_by_name_and_system("test_application", "test_system")
-
-
-def test_get_application_and_system(monkeypatch: Any) -> None:
- """Test exceptional case for get_application_and_system."""
- monkeypatch.setattr(
- "mlia.backend.executor.execution.get_system", MagicMock(return_value=None)
- )
-
- with pytest.raises(ValueError, match="System test_system is not found"):
- get_application_and_system("test_application", "test_system")
-
-
-def test_run_application() -> None:
- """Test function run_application."""
- ctx = run_application("application_4", [], "System 4", [])
-
- assert isinstance(ctx, ExecutionContext)
- assert ctx.stderr is not None and not ctx.stderr.decode()
- assert ctx.stdout is not None and ctx.stdout.decode().strip() == "application_4"
diff --git a/tests/test_backend_executor_fs.py b/tests/test_backend_executor_fs.py
deleted file mode 100644
index 298b8db..0000000
--- a/tests/test_backend_executor_fs.py
+++ /dev/null
@@ -1,138 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Module for testing fs.py."""
-from __future__ import annotations
-
-from contextlib import ExitStack as does_not_raise
-from pathlib import Path
-from typing import Any
-from unittest.mock import MagicMock
-
-import pytest
-
-from mlia.backend.executor.fs import get_backends_path
-from mlia.backend.executor.fs import recreate_directory
-from mlia.backend.executor.fs import remove_directory
-from mlia.backend.executor.fs import remove_resource
-from mlia.backend.executor.fs import ResourceType
-from mlia.backend.executor.fs import valid_for_filename
-
-
-@pytest.mark.parametrize(
- "resource_name,expected_path",
- [
- ("systems", does_not_raise()),
- ("applications", does_not_raise()),
- ("whaaat", pytest.raises(ResourceWarning)),
- (None, pytest.raises(ResourceWarning)),
- ],
-)
-def test_get_backends_path(resource_name: ResourceType, expected_path: Any) -> None:
- """Test get_resources() with multiple parameters."""
- with expected_path:
- resource_path = get_backends_path(resource_name)
- assert resource_path.exists()
-
-
-def test_remove_resource_wrong_directory(
- monkeypatch: Any, test_applications_path: Path
-) -> None:
- """Test removing resource with wrong directory."""
- mock_get_resources = MagicMock(return_value=test_applications_path)
- monkeypatch.setattr(
- "mlia.backend.executor.fs.get_backends_path", mock_get_resources
- )
-
- mock_shutil_rmtree = MagicMock()
- monkeypatch.setattr("mlia.backend.executor.fs.shutil.rmtree", mock_shutil_rmtree)
-
- with pytest.raises(Exception, match="Resource .* does not exist"):
- remove_resource("unknown", "applications")
- mock_shutil_rmtree.assert_not_called()
-
- with pytest.raises(Exception, match="Wrong resource .*"):
- remove_resource("readme.txt", "applications")
- mock_shutil_rmtree.assert_not_called()
-
-
-def test_remove_resource(monkeypatch: Any, test_applications_path: Path) -> None:
- """Test removing resource data."""
- mock_get_resources = MagicMock(return_value=test_applications_path)
- monkeypatch.setattr(
- "mlia.backend.executor.fs.get_backends_path", mock_get_resources
- )
-
- mock_shutil_rmtree = MagicMock()
- monkeypatch.setattr("mlia.backend.executor.fs.shutil.rmtree", mock_shutil_rmtree)
-
- remove_resource("application1", "applications")
- mock_shutil_rmtree.assert_called_once()
-
-
-def test_remove_directory(tmpdir: Any) -> None:
- """Test directory removal."""
- tmpdir_path = Path(tmpdir)
- tmpfile = tmpdir_path / "temp.txt"
-
- for item in [None, tmpfile]:
- with pytest.raises(Exception, match="No directory path provided"):
- remove_directory(item)
-
- newdir = tmpdir_path / "newdir"
- newdir.mkdir()
-
- assert newdir.is_dir()
- remove_directory(newdir)
- assert not newdir.exists()
-
-
-def test_recreate_directory(tmpdir: Any) -> None:
- """Test directory recreation."""
- with pytest.raises(Exception, match="No directory path provided"):
- recreate_directory(None)
-
- tmpdir_path = Path(tmpdir)
- tmpfile = tmpdir_path / "temp.txt"
- tmpfile.touch()
- with pytest.raises(Exception, match="Path .* does exist and it is not a directory"):
- recreate_directory(tmpfile)
-
- newdir = tmpdir_path / "newdir"
- newdir.mkdir()
- newfile = newdir / "newfile"
- newfile.touch()
- assert list(newdir.iterdir()) == [newfile]
- recreate_directory(newdir)
- assert not list(newdir.iterdir())
-
- newdir2 = tmpdir_path / "newdir2"
- assert not newdir2.exists()
- recreate_directory(newdir2)
- assert newdir2.is_dir()
-
-
-def write_to_file(
- write_directory: Any, write_mode: str, write_text: str | bytes
-) -> Path:
- """Write some text to a temporary test file."""
- tmpdir_path = Path(write_directory)
- tmpfile = tmpdir_path / "file_name.txt"
- with open(tmpfile, write_mode) as file: # pylint: disable=unspecified-encoding
- file.write(write_text)
- return tmpfile
-
-
-@pytest.mark.parametrize(
- "value, replacement, expected_result",
- [
- ["", "", ""],
- ["123", "", "123"],
- ["123", "_", "123"],
- ["/some_folder/some_script.sh", "", "some_foldersome_script.sh"],
- ["/some_folder/some_script.sh", "_", "_some_folder_some_script.sh"],
- ["!;'some_name$%^!", "_", "___some_name____"],
- ],
-)
-def test_valid_for_filename(value: str, replacement: str, expected_result: str) -> None:
- """Test function valid_for_filename."""
- assert valid_for_filename(value, replacement) == expected_result
diff --git a/tests/test_backend_executor_output_consumer.py b/tests/test_backend_executor_output_consumer.py
deleted file mode 100644
index 537084f..0000000
--- a/tests/test_backend_executor_output_consumer.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Tests for the output parsing."""
-from __future__ import annotations
-
-import base64
-import json
-from typing import Any
-
-import pytest
-
-from mlia.backend.executor.output_consumer import Base64OutputConsumer
-from mlia.backend.executor.output_consumer import OutputConsumer
-
-
-OUTPUT_MATCH_ALL = bytearray(
- """
-String1: My awesome string!
-String2: STRINGS_ARE_GREAT!!!
-Int: 12
-Float: 3.14
-""",
- encoding="utf-8",
-)
-
-OUTPUT_NO_MATCH = bytearray(
- """
-This contains no matches...
-Test1234567890!"£$%^&*()_+@~{}[]/.,<>?|
-""",
- encoding="utf-8",
-)
-
-OUTPUT_PARTIAL_MATCH = bytearray(
- "String1: My awesome string!",
- encoding="utf-8",
-)
-
-REGEX_CONFIG = {
- "FirstString": {"pattern": r"String1.*: (.*)", "type": "str"},
- "SecondString": {"pattern": r"String2.*: (.*)!!!", "type": "str"},
- "IntegerValue": {"pattern": r"Int.*: (.*)", "type": "int"},
- "FloatValue": {"pattern": r"Float.*: (.*)", "type": "float"},
-}
-
-EMPTY_REGEX_CONFIG: dict[str, dict[str, Any]] = {}
-
-EXPECTED_METRICS_ALL = {
- "FirstString": "My awesome string!",
- "SecondString": "STRINGS_ARE_GREAT",
- "IntegerValue": 12,
- "FloatValue": 3.14,
-}
-
-EXPECTED_METRICS_PARTIAL = {
- "FirstString": "My awesome string!",
-}
-
-
-@pytest.mark.parametrize(
- "expected_metrics",
- [
- EXPECTED_METRICS_ALL,
- EXPECTED_METRICS_PARTIAL,
- ],
-)
-def test_base64_output_consumer(expected_metrics: dict) -> None:
- """
- Make sure the Base64OutputConsumer yields valid results.
-
- I.e. return an empty dict if either the input or the config is empty and
- return the parsed metrics otherwise.
- """
- parser = Base64OutputConsumer()
- assert isinstance(parser, OutputConsumer)
-
- def create_base64_output(expected_metrics: dict) -> bytearray:
- json_str = json.dumps(expected_metrics, indent=4)
- json_b64 = base64.b64encode(json_str.encode("utf-8"))
- return (
- OUTPUT_MATCH_ALL # Should not be matched by the Base64OutputConsumer
- + f"<{Base64OutputConsumer.TAG_NAME}>".encode()
- + bytearray(json_b64)
- + f"</{Base64OutputConsumer.TAG_NAME}>".encode()
- + OUTPUT_NO_MATCH # Just to add some difficulty...
- )
-
- output = create_base64_output(expected_metrics)
-
- consumed = False
- for line in output.splitlines():
- if parser.feed(line.decode("utf-8")):
- consumed = True
- assert consumed # we should have consumed at least one line
-
- res = parser.parsed_output
- assert len(res) == 1
- assert isinstance(res, list)
- for val in res:
- assert val == expected_metrics
diff --git a/tests/test_backend_executor_proc.py b/tests/test_backend_executor_proc.py
deleted file mode 100644
index e8caf8a..0000000
--- a/tests/test_backend_executor_proc.py
+++ /dev/null
@@ -1,190 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-# pylint: disable=attribute-defined-outside-init,not-callable
-"""Pytests for testing mlia/backend/proc.py."""
-from pathlib import Path
-from typing import Any
-from unittest import mock
-
-import pytest
-from sh import ErrorReturnCode
-
-from mlia.backend.executor.proc import Command
-from mlia.backend.executor.proc import CommandFailedException
-from mlia.backend.executor.proc import CommandNotFound
-from mlia.backend.executor.proc import parse_command
-from mlia.backend.executor.proc import print_command_stdout
-from mlia.backend.executor.proc import run_and_wait
-from mlia.backend.executor.proc import ShellCommand
-from mlia.backend.executor.proc import terminate_command
-
-
-class TestShellCommand:
- """Sample class for collecting tests."""
-
- def test_run_ls(self, monkeypatch: Any) -> None:
- """Test a simple ls command."""
- mock_command = mock.MagicMock()
- monkeypatch.setattr(Command, "bake", mock_command)
-
- mock_get_stdout_stderr_paths = mock.MagicMock()
- mock_get_stdout_stderr_paths.return_value = ("/path/std.out", "/path/std.err")
- monkeypatch.setattr(
- ShellCommand, "get_stdout_stderr_paths", mock_get_stdout_stderr_paths
- )
-
- shell_command = ShellCommand()
- shell_command.run("ls", "-l")
- assert mock_command.mock_calls[0] == mock.call(("-l",))
- assert mock_command.mock_calls[1] == mock.call()(
- _bg=True,
- _err="/path/std.err",
- _out="/path/std.out",
- _tee=True,
- _bg_exc=False,
- )
-
- def test_run_command_not_found(self) -> None:
- """Test whe the command doesn't exist."""
- shell_command = ShellCommand()
- with pytest.raises(CommandNotFound):
- shell_command.run("lsl", "-l")
-
- def test_get_stdout_stderr_paths(self) -> None:
- """Test the method to get files to store stdout and stderr."""
- shell_command = ShellCommand()
- out, err = shell_command.get_stdout_stderr_paths("cmd")
- assert out.exists() and out.is_file()
- assert err.exists() and err.is_file()
- assert "cmd" in out.name
- assert "cmd" in err.name
-
-
-@mock.patch("builtins.print")
-def test_print_command_stdout_alive(mock_print: Any) -> None:
- """Test the print command stdout with an alive (running) process."""
- mock_command = mock.MagicMock()
- mock_command.is_alive.return_value = True
- mock_command.next.side_effect = ["test1", "test2", StopIteration]
-
- print_command_stdout(mock_command)
-
- mock_command.assert_has_calls(
- [mock.call.is_alive(), mock.call.next(), mock.call.next()]
- )
- mock_print.assert_has_calls(
- [mock.call("test1", end=""), mock.call("test2", end="")]
- )
-
-
-@mock.patch("builtins.print")
-def test_print_command_stdout_not_alive(mock_print: Any) -> None:
- """Test the print command stdout with a not alive (exited) process."""
- mock_command = mock.MagicMock()
- mock_command.is_alive.return_value = False
- mock_command.stdout = "test"
-
- print_command_stdout(mock_command)
- mock_command.assert_has_calls([mock.call.is_alive()])
- mock_print.assert_called_once_with("test")
-
-
-def test_terminate_command_no_process() -> None:
- """Test command termination when process does not exist."""
- mock_command = mock.MagicMock()
- mock_command.process.signal_group.side_effect = ProcessLookupError()
-
- terminate_command(mock_command)
- mock_command.process.signal_group.assert_called_once()
- mock_command.is_alive.assert_not_called()
-
-
-def test_terminate_command() -> None:
- """Test command termination."""
- mock_command = mock.MagicMock()
- mock_command.is_alive.return_value = False
-
- terminate_command(mock_command)
- mock_command.process.signal_group.assert_called_once()
-
-
-def test_terminate_command_case1() -> None:
- """Test command termination when it takes time.."""
- mock_command = mock.MagicMock()
- mock_command.is_alive.side_effect = [True, True, False]
-
- terminate_command(mock_command, wait_period=0.1)
- mock_command.process.signal_group.assert_called_once()
- assert mock_command.is_alive.call_count == 3
-
-
-def test_terminate_command_case2() -> None:
- """Test command termination when it takes much time.."""
- mock_command = mock.MagicMock()
- mock_command.is_alive.side_effect = [True, True, True]
-
- terminate_command(mock_command, number_of_attempts=3, wait_period=0.1)
- assert mock_command.is_alive.call_count == 3
- assert mock_command.process.signal_group.call_count == 2
-
-
-class TestRunAndWait:
- """Test run_and_wait function."""
-
- @pytest.fixture(autouse=True)
- def setup_method(self, monkeypatch: Any) -> None:
- """Init test method."""
- self.execute_command_mock = mock.MagicMock()
- monkeypatch.setattr(
- "mlia.backend.executor.proc.execute_command", self.execute_command_mock
- )
-
- self.terminate_command_mock = mock.MagicMock()
- monkeypatch.setattr(
- "mlia.backend.executor.proc.terminate_command",
- self.terminate_command_mock,
- )
-
- def test_if_execute_command_raises_exception(self) -> None:
- """Test if execute_command fails."""
- self.execute_command_mock.side_effect = Exception("Error!")
- with pytest.raises(Exception, match="Error!"):
- run_and_wait("command", Path.cwd())
-
- def test_if_command_finishes_with_error(self) -> None:
- """Test if command finishes with error."""
- cmd_mock = mock.MagicMock()
- self.execute_command_mock.return_value = cmd_mock
- exit_code_mock = mock.PropertyMock(
- side_effect=ErrorReturnCode("cmd", bytearray(), bytearray())
- )
- type(cmd_mock).exit_code = exit_code_mock
-
- with pytest.raises(CommandFailedException):
- run_and_wait("command", Path.cwd())
-
- @pytest.mark.parametrize("terminate_on_error, call_count", ((False, 0), (True, 1)))
- def test_if_command_finishes_with_exception(
- self, terminate_on_error: bool, call_count: int
- ) -> None:
- """Test if command finishes with error."""
- cmd_mock = mock.MagicMock()
- self.execute_command_mock.return_value = cmd_mock
- exit_code_mock = mock.PropertyMock(side_effect=Exception("Error!"))
- type(cmd_mock).exit_code = exit_code_mock
-
- with pytest.raises(Exception, match="Error!"):
- run_and_wait("command", Path.cwd(), terminate_on_error=terminate_on_error)
-
- assert self.terminate_command_mock.call_count == call_count
-
-
-def test_parse_command() -> None:
- """Test parse_command function."""
- assert parse_command("1.sh") == ["bash", "1.sh"]
- # The following line raises a B604 bandit error. In our case we specify
- # what shell to use instead of using the default one. It is a safe use
- # we are ignoring this instance.
- assert parse_command("1.sh", shell="sh") == ["sh", "1.sh"] # nosec
- assert parse_command("command") == ["command"]
- assert parse_command("command 123 --param=1") == ["command", "123", "--param=1"]
diff --git a/tests/test_backend_executor_runner.py b/tests/test_backend_executor_runner.py
deleted file mode 100644
index 36c6e5e..0000000
--- a/tests/test_backend_executor_runner.py
+++ /dev/null
@@ -1,254 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Tests for module backend/manager."""
-from __future__ import annotations
-
-from pathlib import Path
-from unittest.mock import MagicMock
-from unittest.mock import PropertyMock
-
-import pytest
-
-from mlia.backend.corstone.performance import BackendRunner
-from mlia.backend.corstone.performance import ExecutionParams
-
-
-class TestBackendRunner:
- """Tests for BackendRunner class."""
-
- @staticmethod
- def _setup_backends(
- monkeypatch: pytest.MonkeyPatch,
- available_systems: list[str] | None = None,
- available_apps: list[str] | None = None,
- ) -> None:
- """Set up backend metadata."""
-
- def mock_system(system: str) -> MagicMock:
- """Mock the System instance."""
- mock = MagicMock()
- type(mock).name = PropertyMock(return_value=system)
- return mock
-
- def mock_app(app: str) -> MagicMock:
- """Mock the Application instance."""
- mock = MagicMock()
- type(mock).name = PropertyMock(return_value=app)
- mock.can_run_on.return_value = True
- return mock
-
- system_mocks = [mock_system(name) for name in (available_systems or [])]
- monkeypatch.setattr(
- "mlia.backend.executor.runner.get_available_systems",
- MagicMock(return_value=system_mocks),
- )
-
- apps_mock = [mock_app(name) for name in (available_apps or [])]
- monkeypatch.setattr(
- "mlia.backend.executor.runner.get_available_applications",
- MagicMock(return_value=apps_mock),
- )
-
- @pytest.mark.parametrize(
- "available_systems, system, installed",
- [
- ([], "system1", False),
- (["system1", "system2"], "system1", True),
- ],
- )
- def test_is_system_installed(
- self,
- available_systems: list,
- system: str,
- installed: bool,
- monkeypatch: pytest.MonkeyPatch,
- ) -> None:
- """Test method is_system_installed."""
- backend_runner = BackendRunner()
-
- self._setup_backends(monkeypatch, available_systems)
-
- assert backend_runner.is_system_installed(system) == installed
-
- @pytest.mark.parametrize(
- "available_systems, systems",
- [
- ([], []),
- (["system1"], ["system1"]),
- ],
- )
- def test_installed_systems(
- self,
- available_systems: list[str],
- systems: list[str],
- monkeypatch: pytest.MonkeyPatch,
- ) -> None:
- """Test method installed_systems."""
- backend_runner = BackendRunner()
-
- self._setup_backends(monkeypatch, available_systems)
- assert backend_runner.get_installed_systems() == systems
-
- @staticmethod
- def test_install_system(monkeypatch: pytest.MonkeyPatch) -> None:
- """Test system installation."""
- install_system_mock = MagicMock()
- monkeypatch.setattr(
- "mlia.backend.executor.runner.install_system", install_system_mock
- )
-
- backend_runner = BackendRunner()
- backend_runner.install_system(Path("test_system_path"))
-
- install_system_mock.assert_called_once_with(Path("test_system_path"))
-
- @pytest.mark.parametrize(
- "available_systems, systems, expected_result",
- [
- ([], [], False),
- (["system1"], [], False),
- (["system1"], ["system1"], True),
- (["system1", "system2"], ["system1", "system3"], False),
- (["system1", "system2"], ["system1", "system2"], True),
- ],
- )
- def test_systems_installed(
- self,
- available_systems: list[str],
- systems: list[str],
- expected_result: bool,
- monkeypatch: pytest.MonkeyPatch,
- ) -> None:
- """Test method systems_installed."""
- self._setup_backends(monkeypatch, available_systems)
-
- backend_runner = BackendRunner()
-
- assert backend_runner.systems_installed(systems) is expected_result
-
- @pytest.mark.parametrize(
- "available_apps, applications, expected_result",
- [
- ([], [], False),
- (["app1"], [], False),
- (["app1"], ["app1"], True),
- (["app1", "app2"], ["app1", "app3"], False),
- (["app1", "app2"], ["app1", "app2"], True),
- ],
- )
- def test_applications_installed(
- self,
- available_apps: list[str],
- applications: list[str],
- expected_result: bool,
- monkeypatch: pytest.MonkeyPatch,
- ) -> None:
- """Test method applications_installed."""
- self._setup_backends(monkeypatch, [], available_apps)
- backend_runner = BackendRunner()
-
- assert backend_runner.applications_installed(applications) is expected_result
-
- @pytest.mark.parametrize(
- "available_apps, applications",
- [
- ([], []),
- (
- ["application1", "application2"],
- ["application1", "application2"],
- ),
- ],
- )
- def test_get_installed_applications(
- self,
- available_apps: list[str],
- applications: list[str],
- monkeypatch: pytest.MonkeyPatch,
- ) -> None:
- """Test method get_installed_applications."""
- self._setup_backends(monkeypatch, [], available_apps)
-
- backend_runner = BackendRunner()
- assert applications == backend_runner.get_installed_applications()
-
- @staticmethod
- def test_install_application(monkeypatch: pytest.MonkeyPatch) -> None:
- """Test application installation."""
- mock_install_application = MagicMock()
- monkeypatch.setattr(
- "mlia.backend.executor.runner.install_application",
- mock_install_application,
- )
-
- backend_runner = BackendRunner()
- backend_runner.install_application(Path("test_application_path"))
- mock_install_application.assert_called_once_with(Path("test_application_path"))
-
- @pytest.mark.parametrize(
- "available_apps, application, installed",
- [
- ([], "system1", False),
- (
- ["application1", "application2"],
- "application1",
- True,
- ),
- (
- [],
- "application1",
- False,
- ),
- ],
- )
- def test_is_application_installed(
- self,
- available_apps: list[str],
- application: str,
- installed: bool,
- monkeypatch: pytest.MonkeyPatch,
- ) -> None:
- """Test method is_application_installed."""
- self._setup_backends(monkeypatch, [], available_apps)
-
- backend_runner = BackendRunner()
- assert installed == backend_runner.is_application_installed(
- application, "system1"
- )
-
- @staticmethod
- @pytest.mark.parametrize(
- "execution_params, expected_command",
- [
- (
- ExecutionParams("application_4", "System 4", [], []),
- ["application_4", [], "System 4", []],
- ),
- (
- ExecutionParams(
- "application_6",
- "System 6",
- ["param1=value2"],
- ["sys-param1=value2"],
- ),
- [
- "application_6",
- ["param1=value2"],
- "System 6",
- ["sys-param1=value2"],
- ],
- ),
- ],
- )
- def test_run_application_local(
- monkeypatch: pytest.MonkeyPatch,
- execution_params: ExecutionParams,
- expected_command: list[str],
- ) -> None:
- """Test method run_application with local systems."""
- run_app = MagicMock()
- monkeypatch.setattr("mlia.backend.executor.runner.run_application", run_app)
-
- backend_runner = BackendRunner()
- backend_runner.run_application(execution_params)
-
- run_app.assert_called_once_with(*expected_command)
diff --git a/tests/test_backend_executor_source.py b/tests/test_backend_executor_source.py
deleted file mode 100644
index 3aa336e..0000000
--- a/tests/test_backend_executor_source.py
+++ /dev/null
@@ -1,205 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Tests for the source backend module."""
-from collections import Counter
-from contextlib import ExitStack as does_not_raise
-from pathlib import Path
-from typing import Any
-from unittest.mock import MagicMock
-from unittest.mock import patch
-
-import pytest
-
-from mlia.backend.executor.common import ConfigurationException
-from mlia.backend.executor.source import create_destination_and_install
-from mlia.backend.executor.source import DirectorySource
-from mlia.backend.executor.source import get_source
-from mlia.backend.executor.source import TarArchiveSource
-
-
-def test_create_destination_and_install(test_systems_path: Path, tmpdir: Any) -> None:
- """Test create_destination_and_install function."""
- system_directory = test_systems_path / "system1"
-
- dir_source = DirectorySource(system_directory)
- resources = Path(tmpdir)
- create_destination_and_install(dir_source, resources)
- assert (resources / "system1").is_dir()
-
-
-@patch(
- "mlia.backend.executor.source.DirectorySource.create_destination",
- return_value=False,
-)
-def test_create_destination_and_install_if_dest_creation_not_required(
- mock_ds_create_destination: Any, tmpdir: Any
-) -> None:
- """Test create_destination_and_install function."""
- dir_source = DirectorySource(Path("unknown"))
- resources = Path(tmpdir)
- with pytest.raises(Exception):
- create_destination_and_install(dir_source, resources)
-
- mock_ds_create_destination.assert_called_once()
-
-
-def test_create_destination_and_install_if_installation_fails(tmpdir: Any) -> None:
- """Test create_destination_and_install function if installation fails."""
- dir_source = DirectorySource(Path("unknown"))
- resources = Path(tmpdir)
- with pytest.raises(Exception, match="Directory .* does not exist"):
- create_destination_and_install(dir_source, resources)
- assert not (resources / "unknown").exists()
- assert resources.exists()
-
-
-def test_create_destination_and_install_if_name_is_empty() -> None:
- """Test create_destination_and_install function fails if source name is empty."""
- source = MagicMock()
- source.create_destination.return_value = True
- source.name.return_value = None
-
- with pytest.raises(Exception, match="Unable to get source name"):
- create_destination_and_install(source, Path("some_path"))
-
- source.install_into.assert_not_called()
-
-
-@pytest.mark.parametrize(
- "source_path, expected_class, expected_error",
- [
- (
- Path("backends/applications/application1/"),
- DirectorySource,
- does_not_raise(),
- ),
- (
- Path("archives/applications/application1.tar.gz"),
- TarArchiveSource,
- does_not_raise(),
- ),
- (
- Path("doesnt/exist"),
- None,
- pytest.raises(
- ConfigurationException, match="Unable to read .*doesnt/exist"
- ),
- ),
- ],
-)
-def test_get_source(
- source_path: Path,
- expected_class: Any,
- expected_error: Any,
- test_resources_path: Path,
-) -> None:
- """Test get_source function."""
- with expected_error:
- full_source_path = test_resources_path / source_path
- source = get_source(full_source_path)
- assert isinstance(source, expected_class)
-
-
-class TestDirectorySource:
- """Test DirectorySource class."""
-
- @pytest.mark.parametrize(
- "directory, name",
- [
- (Path("/some/path/some_system"), "some_system"),
- (Path("some_system"), "some_system"),
- ],
- )
- def test_name(self, directory: Path, name: str) -> None:
- """Test getting source name."""
- assert DirectorySource(directory).name() == name
-
- def test_install_into(self, test_systems_path: Path, tmpdir: Any) -> None:
- """Test install directory into destination."""
- system_directory = test_systems_path / "system1"
-
- dir_source = DirectorySource(system_directory)
- with pytest.raises(Exception, match="Wrong destination .*"):
- dir_source.install_into(Path("unknown_destination"))
-
- tmpdir_path = Path(tmpdir)
- dir_source.install_into(tmpdir_path)
- source_files = [f.name for f in system_directory.iterdir()]
- dest_files = [f.name for f in tmpdir_path.iterdir()]
- assert Counter(source_files) == Counter(dest_files)
-
- def test_install_into_unknown_source_directory(self, tmpdir: Any) -> None:
- """Test install system from unknown directory."""
- with pytest.raises(Exception, match="Directory .* does not exist"):
- DirectorySource(Path("unknown_directory")).install_into(Path(tmpdir))
-
-
-class TestTarArchiveSource:
- """Test TarArchiveSource class."""
-
- @pytest.mark.parametrize(
- "archive, name",
- [
- (Path("some_archive.tgz"), "some_archive"),
- (Path("some_archive.tar.gz"), "some_archive"),
- (Path("some_archive"), "some_archive"),
- ("archives/systems/system1.tar.gz", "system1"),
- ("archives/systems/system1_dir.tar.gz", "system1"),
- ],
- )
- def test_name(self, test_resources_path: Path, archive: Path, name: str) -> None:
- """Test getting source name."""
- assert TarArchiveSource(test_resources_path / archive).name() == name
-
- def test_install_into(self, test_resources_path: Path, tmpdir: Any) -> None:
- """Test install archive into destination."""
- system_archive = test_resources_path / "archives/systems/system1.tar.gz"
-
- tar_source = TarArchiveSource(system_archive)
- with pytest.raises(Exception, match="Wrong destination .*"):
- tar_source.install_into(Path("unknown_destination"))
-
- tmpdir_path = Path(tmpdir)
- tar_source.install_into(tmpdir_path)
- source_files = [
- "backend-config.json.license",
- "backend-config.json",
- "system_artifact",
- ]
- dest_files = [f.name for f in tmpdir_path.iterdir()]
- assert Counter(source_files) == Counter(dest_files)
-
- def test_install_into_unknown_source_archive(self, tmpdir: Any) -> None:
- """Test install unknown source archive."""
- with pytest.raises(Exception, match="File .* does not exist"):
- TarArchiveSource(Path("unknown.tar.gz")).install_into(Path(tmpdir))
-
- def test_install_into_unsupported_source_archive(self, tmpdir: Any) -> None:
- """Test install unsupported file type."""
- plain_text_file = Path(tmpdir) / "test_file"
- plain_text_file.write_text("Not a system config")
-
- with pytest.raises(Exception, match="Unsupported archive type .*"):
- TarArchiveSource(plain_text_file).install_into(Path(tmpdir))
-
- def test_lazy_property_init(self, test_resources_path: Path) -> None:
- """Test that class properties initialized correctly."""
- system_archive = test_resources_path / "archives/systems/system1.tar.gz"
-
- tar_source = TarArchiveSource(system_archive)
- assert tar_source.name() == "system1"
- assert tar_source.config() is not None
- assert tar_source.create_destination()
-
- tar_source = TarArchiveSource(system_archive)
- assert tar_source.config() is not None
- assert tar_source.create_destination()
- assert tar_source.name() == "system1"
-
- def test_create_destination_property(self, test_resources_path: Path) -> None:
- """Test create_destination property filled correctly for different archives."""
- system_archive1 = test_resources_path / "archives/systems/system1.tar.gz"
- system_archive2 = test_resources_path / "archives/systems/system1_dir.tar.gz"
-
- assert TarArchiveSource(system_archive1).create_destination()
- assert not TarArchiveSource(system_archive2).create_destination()
diff --git a/tests/test_backend_executor_system.py b/tests/test_backend_executor_system.py
deleted file mode 100644
index c94ef30..0000000
--- a/tests/test_backend_executor_system.py
+++ /dev/null
@@ -1,358 +0,0 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-"""Tests for system backend."""
-from __future__ import annotations
-
-from contextlib import ExitStack as does_not_raise
-from pathlib import Path
-from typing import Any
-from unittest.mock import MagicMock
-
-import pytest
-
-from mlia.backend.executor.common import Command
-from mlia.backend.executor.common import ConfigurationException
-from mlia.backend.executor.common import Param
-from mlia.backend.executor.common import UserParamConfig
-from mlia.backend.executor.config import SystemConfig
-from mlia.backend.executor.system import get_available_systems
-from mlia.backend.executor.system import get_system
-from mlia.backend.executor.system import install_system
-from mlia.backend.executor.system import load_system
-from mlia.backend.executor.system import remove_system
-from mlia.backend.executor.system import System
-
-
-def test_get_available_systems() -> None:
- """Test get_available_systems mocking get_resources."""
- available_systems = get_available_systems()
- assert all(isinstance(s, System) for s in available_systems)
- assert len(available_systems) == 4
- assert [str(s) for s in available_systems] == [
- "System 1",
- "System 2",
- "System 4",
- "System 6",
- ]
-
-
-def test_get_system() -> None:
- """Test get_system."""
- system1 = get_system("System 1")
- assert isinstance(system1, System)
- assert system1.name == "System 1"
-
- system2 = get_system("System 2")
- # check that comparison with object of another type returns false
- assert system1 != 42
- assert system1 != system2
-
- with pytest.raises(
- ConfigurationException, match="System 'Unknown system' not found."
- ):
- get_system("Unknown system")
-
-
-@pytest.mark.parametrize(
- "source, call_count, exception_type",
- (
- (
- "archives/systems/system1.tar.gz",
- 0,
- pytest.raises(Exception, match="Systems .* are already installed"),
- ),
- (
- "archives/systems/system3.tar.gz",
- 0,
- pytest.raises(Exception, match="Unable to read system definition"),
- ),
- (
- "backends/systems/system1",
- 0,
- pytest.raises(Exception, match="Systems .* are already installed"),
- ),
- (
- "backends/systems/system3",
- 0,
- pytest.raises(Exception, match="Unable to read system definition"),
- ),
- ("unknown_path", 0, pytest.raises(Exception, match="Unable to read")),
- (
- "various/systems/system_with_empty_config",
- 0,
- pytest.raises(Exception, match="No system definition found"),
- ),
- ("various/systems/system_with_valid_config", 1, does_not_raise()),
- ),
-)
-def test_install_system(
- monkeypatch: Any,
- test_resources_path: Path,
- source: str,
- call_count: int,
- exception_type: Any,
-) -> None:
- """Test system installation from archive."""
- mock_create_destination_and_install = MagicMock()
- monkeypatch.setattr(
- "mlia.backend.executor.system.create_destination_and_install",
- mock_create_destination_and_install,
- )
-
- with exception_type:
- install_system(test_resources_path / source)
-
- assert mock_create_destination_and_install.call_count == call_count
-
-
-def test_remove_system(monkeypatch: Any) -> None:
- """Test system removal."""
- mock_remove_backend = MagicMock()
- monkeypatch.setattr(
- "mlia.backend.executor.system.remove_backend", mock_remove_backend
- )
- remove_system("some_system_dir")
- mock_remove_backend.assert_called_once()
-
-
-def test_system() -> None:
- """Test the System class."""
- config = SystemConfig(name="System 1")
- system = System(config)
- assert str(system) == "System 1"
- assert system.name == "System 1"
-
-
-def test_system_with_empty_parameter_name() -> None:
- """Test that configuration fails if parameter name is empty."""
- bad_config = SystemConfig(
- name="System 1",
- commands={"run": ["run"]},
- user_params={"run": [{"name": "", "values": ["1", "2", "3"]}]},
- )
- with pytest.raises(Exception, match="Parameter has an empty 'name' attribute."):
- System(bad_config)
-
-
-def test_system_run() -> None:
- """Test run operation for system."""
- system = get_system("System 4")
- assert isinstance(system, System)
-
- system.run("echo 'application run'")
-
-
-def test_system_start_no_config_location() -> None:
- """Test that system without config location could not start."""
- system = load_system(SystemConfig(name="test"))
-
- assert isinstance(system, System)
- with pytest.raises(
- ConfigurationException, match="System has invalid config location: None"
- ):
- system.run("sleep 100")
-
-
-@pytest.mark.parametrize(
- "config, expected_class, expected_error",
- [
- (
- SystemConfig(name="test"),
- System,
- does_not_raise(),
- ),
- (SystemConfig(), None, pytest.raises(ConfigurationException)),
- ],
-)
-def test_load_system(
- config: SystemConfig, expected_class: type, expected_error: Any
-) -> None:
- """Test load_system function."""
- if not expected_class:
- with expected_error:
- load_system(config)
- else:
- system = load_system(config)
- assert isinstance(system, expected_class)
-
-
-def test_load_system_populate_shared_params() -> None:
- """Test shared parameters population."""
- with pytest.raises(Exception, match="All shared parameters should have aliases"):
- load_system(
- SystemConfig(
- name="test_system",
- user_params={
- "shared": [
- UserParamConfig(
- name="--shared_param1",
- description="Shared parameter",
- values=["1", "2", "3"],
- default_value="1",
- )
- ]
- },
- )
- )
-
- with pytest.raises(
- Exception, match="All parameters for command run should have aliases"
- ):
- load_system(
- SystemConfig(
- name="test_system",
- user_params={
- "shared": [
- UserParamConfig(
- name="--shared_param1",
- description="Shared parameter",
- values=["1", "2", "3"],
- default_value="1",
- alias="shared_param1",
- )
- ],
- "run": [
- UserParamConfig(
- name="--run_param1",
- description="Run specific parameter",
- values=["1", "2", "3"],
- default_value="2",
- )
- ],
- },
- )
- )
- system0 = load_system(
- SystemConfig(
- name="test_system",
- commands={"run": ["run_command"]},
- user_params={
- "shared": [],
- "run": [
- UserParamConfig(
- name="--run_param1",
- description="Run specific parameter",
- values=["1", "2", "3"],
- default_value="2",
- alias="run_param1",
- )
- ],
- },
- )
- )
- assert len(system0.commands) == 1
- run_command1 = system0.commands["run"]
- assert run_command1 == Command(
- ["run_command"],
- [
- Param(
- "--run_param1",
- "Run specific parameter",
- ["1", "2", "3"],
- "2",
- "run_param1",
- )
- ],
- )
-
- system1 = load_system(
- SystemConfig(
- name="test_system",
- user_params={
- "shared": [
- UserParamConfig(
- name="--shared_param1",
- description="Shared parameter",
- values=["1", "2", "3"],
- default_value="1",
- alias="shared_param1",
- )
- ],
- "run": [
- UserParamConfig(
- name="--run_param1",
- description="Run specific parameter",
- values=["1", "2", "3"],
- default_value="2",
- alias="run_param1",
- )
- ],
- },
- )
- )
- assert len(system1.commands) == 1
-
- run_command1 = system1.commands["run"]
- assert run_command1 == Command(
- [],
- [
- Param(
- "--shared_param1",
- "Shared parameter",
- ["1", "2", "3"],
- "1",
- "shared_param1",
- ),
- Param(
- "--run_param1",
- "Run specific parameter",
- ["1", "2", "3"],
- "2",
- "run_param1",
- ),
- ],
- )
-
- system2 = load_system(
- SystemConfig(
- name="test_system",
- commands={"build": ["build_command"]},
- user_params={
- "shared": [
- UserParamConfig(
- name="--shared_param1",
- description="Shared parameter",
- values=["1", "2", "3"],
- default_value="1",
- alias="shared_param1",
- )
- ],
- "run": [
- UserParamConfig(
- name="--run_param1",
- description="Run specific parameter",
- values=["1", "2", "3"],
- default_value="2",
- alias="run_param1",
- )
- ],
- },
- )
- )
- assert len(system2.commands) == 2
- build_command2 = system2.commands["build"]
- assert build_command2 == Command(
- ["build_command"],
- [],
- )
-
- run_command2 = system1.commands["run"]
- assert run_command2 == Command(
- [],
- [
- Param(
- "--shared_param1",
- "Shared parameter",
- ["1", "2", "3"],
- "1",
- "shared_param1",
- ),
- Param(
- "--run_param1",
- "Run specific parameter",
- ["1", "2", "3"],
- "2",
- "run_param1",
- ),
- ],
- )
diff --git a/tests/test_backend_install.py b/tests/test_backend_install.py
index c3efe09..dacb1aa 100644
--- a/tests/test_backend_install.py
+++ b/tests/test_backend_install.py
@@ -3,91 +3,203 @@
"""Tests for common management functionality."""
from __future__ import annotations
+import tarfile
from pathlib import Path
+from unittest.mock import ANY
+from unittest.mock import MagicMock
import pytest
from mlia.backend.install import BackendInfo
-from mlia.backend.install import get_all_application_names
+from mlia.backend.install import BackendInstallation
+from mlia.backend.install import CompoundPathChecker
+from mlia.backend.install import DownloadAndInstall
+from mlia.backend.install import InstallFromPath
+from mlia.backend.install import PackagePathChecker
from mlia.backend.install import StaticPathChecker
-from mlia.backend.registry import get_supported_backends
-from mlia.target.registry import is_supported
+from mlia.backend.repo import BackendRepository
+
+
+@pytest.fixture(name="backend_repo")
+def mock_backend_repo(monkeypatch: pytest.MonkeyPatch) -> MagicMock:
+ """Mock backend repository."""
+ mock = MagicMock(spec=BackendRepository)
+ monkeypatch.setattr("mlia.backend.install.get_backend_repository", lambda: mock)
+
+ return mock
+
+
+def test_wrong_install_type() -> None:
+ """Test that installation should fail for wrong install type."""
+ installation = BackendInstallation(
+ "sample_backend",
+ "Sample backend",
+ "sample_backend",
+ None,
+ None,
+ lambda path: None,
+ None,
+ )
+
+ assert not installation.supports("some_path") # type: ignore
+
+ with pytest.raises(Exception):
+ installation.install("some_path") # type: ignore
@pytest.mark.parametrize(
- "copy_source, system_config",
+ "supported_platforms, expected_result",
[
- (True, "system_config.json"),
- (True, None),
- (False, "system_config.json"),
- (False, None),
+ [None, True],
+ [["UNKNOWN"], False],
],
)
-def test_static_path_checker(
- tmp_path: Path, copy_source: bool, system_config: str
+def test_backend_could_be_installed(
+ supported_platforms: list[str] | None, expected_result: bool
+) -> None:
+ """Test method could_be_installed."""
+ installation = BackendInstallation(
+ "sample_backend",
+ "Sample backend",
+ "sample_backend",
+ None,
+ supported_platforms,
+ lambda path: None,
+ None,
+ )
+
+ assert installation.could_be_installed == expected_result
+
+
+@pytest.mark.parametrize("copy_source", [True, False])
+def test_backend_installation_from_path(
+ tmp_path: Path, backend_repo: MagicMock, copy_source: bool
+) -> None:
+ """Test methods of backend installation."""
+ installation = BackendInstallation(
+ "sample_backend",
+ "Sample backend",
+ "sample_backend",
+ None,
+ None,
+ lambda path: BackendInfo(path, copy_source=copy_source),
+ None,
+ )
+
+ assert installation.supports(InstallFromPath(tmp_path))
+ assert not installation.supports(DownloadAndInstall())
+
+ installation.install(InstallFromPath(tmp_path))
+
+ if copy_source:
+ backend_repo.copy_backend.assert_called_with(
+ "sample_backend", tmp_path, "sample_backend", None
+ )
+ backend_repo.add_backend.assert_not_called()
+ else:
+ backend_repo.copy_backend.assert_not_called()
+ backend_repo.add_backend.assert_called_with("sample_backend", tmp_path, None)
+
+
+def test_backend_installation_download_and_install(
+ tmp_path: Path, backend_repo: MagicMock
) -> None:
- """Test static path checker."""
- checker = StaticPathChecker(tmp_path, ["file1.txt"], copy_source, system_config)
- tmp_path.joinpath("file1.txt").touch()
+ """Test methods of backend installation."""
+ download_artifact_mock = MagicMock()
- result = checker(tmp_path)
- assert result == BackendInfo(tmp_path, copy_source, system_config)
+ tmp_archive = tmp_path.joinpath("sample.tgz")
+ sample_file = tmp_path.joinpath("sample.txt")
+ sample_file.touch()
+ with tarfile.open(tmp_archive, "w:gz") as archive:
+ archive.add(sample_file)
-def test_static_path_checker_invalid_path(tmp_path: Path) -> None:
- """Test static path checker with invalid path."""
- checker = StaticPathChecker(tmp_path, ["file1.txt"])
+ download_artifact_mock.download_to.return_value = tmp_archive
- result = checker(tmp_path)
- assert result is None
+ installation = BackendInstallation(
+ "sample_backend",
+ "Sample backend",
+ "sample_backend",
+ download_artifact_mock,
+ None,
+ lambda path: BackendInfo(path, copy_source=False),
+ lambda eula_agreement, path: path,
+ )
- result = checker(tmp_path / "unknown_directory")
- assert result is None
+ assert installation.supports(DownloadAndInstall())
+ installation.install(DownloadAndInstall())
+ backend_repo.add_backend.assert_called_with("sample_backend", ANY, None)
-def test_supported_backends() -> None:
- """Test function supported backends."""
- assert get_supported_backends() == [
- "ArmNNTFLiteDelegate",
- "Corstone-300",
- "Corstone-310",
- "TOSA-Checker",
- "Vela",
- ]
+def test_backend_installation_unable_to_download() -> None:
+ """Test that installation should fail when downloading fails."""
+ download_artifact_mock = MagicMock()
+ download_artifact_mock.download_to.side_effect = Exception("Download error")
-@pytest.mark.parametrize(
- "backend, expected_result",
- [
- ["unknown_backend", False],
- ["Corstone-300", True],
- ["Corstone-310", True],
- ],
-)
-def test_is_supported(backend: str, expected_result: bool) -> None:
- """Test function is_supported."""
- assert is_supported(backend) == expected_result
+ installation = BackendInstallation(
+ "sample_backend",
+ "Sample backend",
+ "sample_backend",
+ download_artifact_mock,
+ None,
+ lambda path: BackendInfo(path, copy_source=False),
+ lambda eula_agreement, path: path,
+ )
+ with pytest.raises(Exception, match="Unable to download backend artifact"):
+ installation.install(DownloadAndInstall())
-@pytest.mark.parametrize(
- "backend, expected_result",
- [
- [
- "Corstone-300",
- [
- "Generic Inference Runner: Ethos-U55",
- "Generic Inference Runner: Ethos-U65",
- ],
- ],
- [
- "Corstone-310",
- [
- "Generic Inference Runner: Ethos-U55",
- "Generic Inference Runner: Ethos-U65",
- ],
- ],
- ],
-)
-def test_get_all_application_names(backend: str, expected_result: list[str]) -> None:
- """Test function get_all_application_names."""
- assert sorted(get_all_application_names(backend)) == expected_result
+
+def test_static_path_checker(tmp_path: Path) -> None:
+ """Test for StaticPathChecker."""
+ checker1 = StaticPathChecker(tmp_path, [])
+ assert checker1(tmp_path) == BackendInfo(tmp_path, copy_source=False)
+
+ checker2 = StaticPathChecker(tmp_path / "dist", [])
+ assert checker2(tmp_path) is None
+
+ checker3 = StaticPathChecker(tmp_path, ["sample.txt"])
+
+ assert checker3(tmp_path) is None
+
+ sample_file = tmp_path.joinpath("sample.txt")
+ sample_file.touch()
+
+ assert checker3(tmp_path) == BackendInfo(tmp_path, copy_source=False)
+
+
+def test_compound_path_checker(tmp_path: Path) -> None:
+ """Test for CompoundPathChecker."""
+ static_checker = StaticPathChecker(tmp_path, [])
+ compound_checker = CompoundPathChecker(static_checker)
+
+ assert compound_checker(tmp_path) == BackendInfo(tmp_path, copy_source=False)
+
+
+def test_package_path_checker(tmp_path: Path) -> None:
+ """Test PackagePathChecker."""
+ sample_dir = tmp_path.joinpath("sample")
+ sample_dir.mkdir()
+
+ checker1 = PackagePathChecker([], "sample")
+ assert checker1(tmp_path) == BackendInfo(tmp_path / "sample")
+
+ checker2 = PackagePathChecker(["sample.txt"], "sample")
+ assert checker2(tmp_path) is None
+
+
+def test_backend_installation_uninstall(backend_repo: MagicMock) -> None:
+ """Test backend removing process."""
+ installation = BackendInstallation(
+ "sample_backend",
+ "Sample backend",
+ "sample_backend",
+ None,
+ None,
+ lambda path: None,
+ None,
+ )
+
+ installation.uninstall()
+ backend_repo.remove_backend.assert_called_with("sample_backend")
diff --git a/tests/test_backend_manager.py b/tests/test_backend_manager.py
index 19cb357..879353e 100644
--- a/tests/test_backend_manager.py
+++ b/tests/test_backend_manager.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Tests for installation manager."""
from __future__ import annotations
@@ -16,6 +16,8 @@ from mlia.backend.install import Installation
from mlia.backend.install import InstallationType
from mlia.backend.install import InstallFromPath
from mlia.backend.manager import DefaultInstallationManager
+from mlia.core.errors import ConfigurationError
+from mlia.core.errors import InternalError
def get_default_installation_manager_mock(
@@ -255,6 +257,25 @@ def test_installation_manager_install_from(
install_mock.uninstall.assert_not_called()
+def test_installation_manager_unsupported_install_type(
+ monkeypatch: pytest.MonkeyPatch,
+ tmp_path: Path,
+) -> None:
+ """Test that installation could not be installed via unsupported type."""
+ download_install_mock = _could_be_downloaded_and_installed_mock()
+ install_from_mock = _could_be_installed_from_mock()
+ install_mocks = [download_install_mock, install_from_mock]
+
+ manager = get_installation_manager(False, install_mocks, monkeypatch)
+ manager.install_from(tmp_path, "could_be_downloaded_and_installed")
+
+ manager.download_and_install("could_be_installed_from")
+
+ for mock in install_mocks:
+ mock.install.assert_not_called()
+ mock.uninstall.assert_not_called()
+
+
@pytest.mark.parametrize("noninteractive", [True, False])
@pytest.mark.parametrize(
"install_mock, backend_name, expected_call",
@@ -280,3 +301,39 @@ def test_installation_manager_uninstall(
manager.uninstall(backend_name)
assert install_mock.uninstall.mock_calls == expected_call
+
+
+def test_installation_internal_error(monkeypatch: pytest.MonkeyPatch) -> None:
+ """Test that manager should be able to detect wrong state."""
+ install_mock = _ready_for_uninstall_mock()
+ manager = get_installation_manager(False, [install_mock, install_mock], monkeypatch)
+
+ with pytest.raises(
+ InternalError,
+ match="More than one installed backend with name already_installed found",
+ ):
+ manager.uninstall("already_installed")
+
+
+def test_uninstall_unknown_backend(monkeypatch: pytest.MonkeyPatch) -> None:
+ """Test that uninstall should fail for uknown backend."""
+ install_mock = _ready_for_uninstall_mock()
+ manager = get_installation_manager(False, [install_mock, install_mock], monkeypatch)
+
+ with pytest.raises(
+ ConfigurationError, match="Backend 'some_backend' is not installed"
+ ):
+ manager.uninstall("some_backend")
+
+
+def test_show_env_details(monkeypatch: pytest.MonkeyPatch) -> None:
+ """Test method show_env_details."""
+ ready_to_install_mock = _ready_for_installation_mock()
+ could_be_installed_mock = _could_be_installed_from_mock()
+
+ manager = get_installation_manager(
+ False,
+ [ready_to_install_mock, could_be_installed_mock],
+ monkeypatch,
+ )
+ manager.show_env_details()
diff --git a/tests/test_backend_registry.py b/tests/test_backend_registry.py
index 703e699..e3e2da2 100644
--- a/tests/test_backend_registry.py
+++ b/tests/test_backend_registry.py
@@ -9,6 +9,8 @@ import pytest
from mlia.backend.config import BackendType
from mlia.backend.config import System
+from mlia.backend.registry import get_supported_backends
+from mlia.backend.registry import get_supported_systems
from mlia.backend.registry import registry
from mlia.core.common import AdviceCategory
@@ -35,7 +37,7 @@ from mlia.core.common import AdviceCategory
BackendType.CUSTOM,
),
(
- "TOSA-Checker",
+ "tosa-checker",
[AdviceCategory.COMPATIBILITY],
[System.LINUX_AMD64],
BackendType.WHEEL,
@@ -79,3 +81,19 @@ def test_backend_registry(
cfg.supported_systems
), f"Supported systems differs: {advices} != {cfg.supported_advice}"
assert cfg.type == type_
+
+
+def test_get_supported_backends() -> None:
+ """Test function get_supported_backends."""
+ assert get_supported_backends() == [
+ "ArmNNTFLiteDelegate",
+ "Corstone-300",
+ "Corstone-310",
+ "Vela",
+ "tosa-checker",
+ ]
+
+
+def test_get_supported_systems() -> None:
+ """Test function get_supported_systems."""
+ assert get_supported_systems()
diff --git a/tests/test_backend_repo.py b/tests/test_backend_repo.py
new file mode 100644
index 0000000..5071989
--- /dev/null
+++ b/tests/test_backend_repo.py
@@ -0,0 +1,140 @@
+# SPDX-FileCopyrightText: Copyright 2023, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Tests for backend repository."""
+from __future__ import annotations
+
+import json
+from pathlib import Path
+
+import pytest
+
+from mlia.backend.repo import BackendRepository
+from mlia.backend.repo import get_backend_repository
+
+
+def test_get_backend_repository(tmp_path: Path) -> None:
+ """Test function get_backend_repository."""
+ repo_path = tmp_path / "repo"
+ repo = get_backend_repository(repo_path)
+
+ assert isinstance(repo, BackendRepository)
+
+ backends_dir = repo_path / "backends"
+ assert backends_dir.is_dir()
+ assert not list(backends_dir.iterdir())
+
+ config_file = repo_path / "mlia_config.json"
+ assert config_file.is_file()
+ assert json.loads(config_file.read_text()) == {"backends": []}
+
+
+def test_backend_repository_wrong_directory(tmp_path: Path) -> None:
+ """Test that repository instance should throw error for the wrong directory."""
+ with pytest.raises(
+ Exception, match=f"Directory {tmp_path} could not be used as MLIA repository."
+ ):
+ BackendRepository(tmp_path)
+
+
+def test_empty_backend_repository(tmp_path: Path) -> None:
+ """Test empty backend repository."""
+ repo_path = tmp_path / "repo"
+ repo = get_backend_repository(repo_path)
+
+ assert not repo.is_backend_installed("sample_backend")
+
+ with pytest.raises(Exception, match="Backend sample_backend is not installed."):
+ repo.remove_backend("sample_backend")
+
+ with pytest.raises(Exception, match="Backend sample_backend is not installed."):
+ repo.get_backend_settings("sample_backend")
+
+
+def test_adding_backend(tmp_path: Path) -> None:
+ """Test adding backend to the repository."""
+ repo_path = tmp_path / "repo"
+ repo = get_backend_repository(repo_path)
+
+ backend_path = tmp_path.joinpath("backend")
+ backend_path.mkdir()
+
+ settings = {"param": "value"}
+ repo.add_backend("sample_backend", backend_path, settings)
+
+ backends_dir = repo_path / "backends"
+ assert backends_dir.is_dir()
+ assert not list(backends_dir.iterdir())
+
+ assert repo.is_backend_installed("sample_backend")
+ expected_settings = {
+ "param": "value",
+ "backend_path": backend_path.as_posix(),
+ }
+ assert repo.get_backend_settings("sample_backend") == (
+ backend_path,
+ expected_settings,
+ )
+
+ config_file = repo_path / "mlia_config.json"
+ expected_content = {
+ "backends": [
+ {
+ "name": "sample_backend",
+ "settings": {
+ "backend_path": backend_path.as_posix(),
+ "param": "value",
+ },
+ }
+ ]
+ }
+ assert json.loads(config_file.read_text()) == expected_content
+
+ with pytest.raises(Exception, match="Backend sample_backend already installed"):
+ repo.add_backend("sample_backend", backend_path, settings)
+
+ repo.remove_backend("sample_backend")
+ assert not repo.is_backend_installed("sample_backend")
+
+
+def test_copy_backend(tmp_path: Path) -> None:
+ """Test copying backend to the repository."""
+ repo_path = tmp_path / "repo"
+ repo = get_backend_repository(repo_path)
+
+ backend_path = tmp_path.joinpath("backend")
+ backend_path.mkdir()
+
+ backend_path.joinpath("sample.txt").touch()
+
+ settings = {"param": "value"}
+ repo.copy_backend("sample_backend", backend_path, "sample_backend_dir", settings)
+
+ repo_backend_path = repo_path.joinpath("backends", "sample_backend_dir")
+ assert repo_backend_path.is_dir()
+ assert repo_backend_path.joinpath("sample.txt").is_file()
+
+ config_file = repo_path / "mlia_config.json"
+ expected_content = {
+ "backends": [
+ {
+ "name": "sample_backend",
+ "settings": {
+ "backend_dir": "sample_backend_dir",
+ "param": "value",
+ },
+ }
+ ]
+ }
+ assert json.loads(config_file.read_text()) == expected_content
+
+ expected_settings = {
+ "param": "value",
+ "backend_dir": "sample_backend_dir",
+ }
+ assert repo.get_backend_settings("sample_backend") == (
+ repo_backend_path,
+ expected_settings,
+ )
+
+ repo.remove_backend("sample_backend")
+ assert not repo_backend_path.exists()
diff --git a/tests/test_cli_command_validators.py b/tests/test_cli_command_validators.py
index 13514a5..cd048ee 100644
--- a/tests/test_cli_command_validators.py
+++ b/tests/test_cli_command_validators.py
@@ -4,6 +4,7 @@
from __future__ import annotations
import argparse
+from contextlib import ExitStack
from unittest.mock import MagicMock
import pytest
@@ -125,9 +126,9 @@ def test_validate_check_target_profile(
None,
],
["tosa", None, False, None, ["tosa-checker"]],
- ["cortex-a", None, False, None, ["armnn-tflitedelegate"]],
+ ["cortex-a", None, False, None, ["ArmNNTFLiteDelegate"]],
["tosa", ["tosa-checker"], False, None, ["tosa-checker"]],
- ["cortex-a", ["armnn-tflitedelegate"], False, None, ["armnn-tflitedelegate"]],
+ ["cortex-a", ["ArmNNTFLiteDelegate"], False, None, ["ArmNNTFLiteDelegate"]],
[
"ethos-u55-256",
["Vela", "Corstone-300"],
@@ -158,10 +159,11 @@ def test_validate_backend(
MagicMock(return_value=["Vela", "Corstone-300"]),
)
+ exit_stack = ExitStack()
if throws_exception:
- with pytest.raises(argparse.ArgumentError) as err:
- validate_backend(input_target_profile, input_backends)
- assert str(err.value.message) == exception_message
- return
+ exit_stack.enter_context(
+ pytest.raises(argparse.ArgumentError, match=exception_message)
+ )
- assert validate_backend(input_target_profile, input_backends) == output_backends
+ with exit_stack:
+ assert validate_backend(input_target_profile, input_backends) == output_backends
diff --git a/tests/test_cli_config.py b/tests/test_cli_config.py
index 1487f11..8494d73 100644
--- a/tests/test_cli_config.py
+++ b/tests/test_cli_config.py
@@ -8,7 +8,6 @@ from unittest.mock import MagicMock
import pytest
from mlia.cli.config import get_default_backends
-from mlia.cli.config import is_corstone_backend
@pytest.mark.parametrize(
@@ -28,7 +27,7 @@ from mlia.cli.config import is_corstone_backend
["Vela", "Corstone-300", "New backend"],
],
[["ArmNNTFLiteDelegate"], ["ArmNNTFLiteDelegate"]],
- [["TOSA-Checker"], ["TOSA-Checker"]],
+ [["tosa-checker"], ["tosa-checker"]],
[
["ArmNNTFLiteDelegate", "Corstone-300"],
["ArmNNTFLiteDelegate", "Corstone-300"],
@@ -47,10 +46,3 @@ def test_get_default_backends(
)
assert get_default_backends() == expected_default_backends
-
-
-def test_is_corstone_backend() -> None:
- """Test function is_corstone_backend."""
- assert is_corstone_backend("Corstone-300") is True
- assert is_corstone_backend("Corstone-310") is True
- assert is_corstone_backend("New backend") is False
diff --git a/tests/test_resources/application_config.json b/tests/test_resources/application_config.json
deleted file mode 100644
index 8c5d2b5..0000000
--- a/tests/test_resources/application_config.json
+++ /dev/null
@@ -1,94 +0,0 @@
-[
- {
- "name": "application_1",
- "description": "application number one",
- "supported_systems": [
- "system_1",
- "system_2"
- ],
- "commands": {
- "clean": [
- "clean_cmd_11"
- ],
- "build": [
- "build_cmd_11"
- ],
- "run": [
- "run_cmd_11"
- ],
- "post_run": [
- "post_run_cmd_11"
- ]
- },
- "user_params": {
- "run": [
- {
- "name": "run_param_11",
- "values": [],
- "description": "run param number one"
- }
- ],
- "build": [
- {
- "name": "build_param_11",
- "values": [],
- "description": "build param number one"
- },
- {
- "name": "build_param_12",
- "values": [],
- "description": "build param number two"
- },
- {
- "name": "build_param_13",
- "values": [
- "value_1"
- ],
- "description": "build param number three with some value"
- }
- ]
- }
- },
- {
- "name": "application_2",
- "description": "application number two",
- "supported_systems": [
- "system_2"
- ],
- "commands": {
- "clean": [
- "clean_cmd_21"
- ],
- "build": [
- "build_cmd_21",
- "build_cmd_22"
- ],
- "run": [
- "run_cmd_21"
- ],
- "post_run": [
- "post_run_cmd_21"
- ]
- },
- "user_params": {
- "build": [
- {
- "name": "build_param_21",
- "values": [],
- "description": "build param number one"
- },
- {
- "name": "build_param_22",
- "values": [],
- "description": "build param number two"
- },
- {
- "name": "build_param_23",
- "values": [],
- "description": "build param number three"
- }
- ],
- "run": []
- }
- }
-]
diff --git a/tests/test_resources/application_config.json.license b/tests/test_resources/application_config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/application_config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/backends/applications/application1/backend-config.json b/tests/test_resources/backends/applications/application1/backend-config.json
deleted file mode 100644
index 96d5420..0000000
--- a/tests/test_resources/backends/applications/application1/backend-config.json
+++ /dev/null
@@ -1,29 +0,0 @@
-[
- {
- "name": "application_1",
- "description": "This is application 1",
- "supported_systems": [
- {
- "name": "System 1"
- }
- ],
- "commands": {
- "clean": [
- "echo 'clean'"
- ],
- "build": [
- "echo 'build'"
- ],
- "run": [
- "echo 'run'"
- ],
- "post_run": [
- "echo 'post_run'"
- ]
- },
- "user_params": {
- "build": [],
- "run": []
- }
- }
-]
diff --git a/tests/test_resources/backends/applications/application1/backend-config.json.license b/tests/test_resources/backends/applications/application1/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/backends/applications/application1/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/backends/applications/application2/backend-config.json b/tests/test_resources/backends/applications/application2/backend-config.json
deleted file mode 100644
index 3a3969a..0000000
--- a/tests/test_resources/backends/applications/application2/backend-config.json
+++ /dev/null
@@ -1,29 +0,0 @@
-[
- {
- "name": "application_2",
- "description": "This is application 2",
- "supported_systems": [
- {
- "name": "System 2"
- }
- ],
- "commands": {
- "clean": [
- "echo 'clean'"
- ],
- "build": [
- "echo 'build'"
- ],
- "run": [
- "echo 'run'"
- ],
- "post_run": [
- "echo 'post_run'"
- ]
- },
- "user_params": {
- "build": [],
- "run": []
- }
- }
-]
diff --git a/tests/test_resources/backends/applications/application2/backend-config.json.license b/tests/test_resources/backends/applications/application2/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/backends/applications/application2/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/backends/applications/application3/readme.txt b/tests/test_resources/backends/applications/application3/readme.txt
deleted file mode 100644
index 8c72c05..0000000
--- a/tests/test_resources/backends/applications/application3/readme.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-SPDX-License-Identifier: Apache-2.0
-
-This application does not have json configuration file
diff --git a/tests/test_resources/backends/applications/application4/backend-config.json b/tests/test_resources/backends/applications/application4/backend-config.json
deleted file mode 100644
index 11017e4..0000000
--- a/tests/test_resources/backends/applications/application4/backend-config.json
+++ /dev/null
@@ -1,33 +0,0 @@
-[
- {
- "name": "application_4",
- "description": "This is application 4",
- "variables": {
- "build_dir": "build"
- },
- "supported_systems": [
- {
- "name": "System 4"
- }
- ],
- "commands": {
- "run": [
- "echo {application.name}"
- ]
- },
- "user_params": {
- "run": [
- {
- "name": "--app",
- "description": "Sample command param",
- "values": [
- "application1",
- "application2",
- "application3"
- ],
- "default_value": "application1"
- }
- ]
- }
- }
-]
diff --git a/tests/test_resources/backends/applications/application4/backend-config.json.license b/tests/test_resources/backends/applications/application4/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/backends/applications/application4/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/backends/applications/application5/backend-config.json b/tests/test_resources/backends/applications/application5/backend-config.json
deleted file mode 100644
index 219494c..0000000
--- a/tests/test_resources/backends/applications/application5/backend-config.json
+++ /dev/null
@@ -1,134 +0,0 @@
-[
- {
- "name": "application_5",
- "description": "This is application 5",
- "supported_systems": [
- {
- "name": "System 1"
- },
- {
- "name": "System 2"
- }
- ],
- "variables": {
- "var1": "value1",
- "var2": "value2"
- },
- "commands": {
- "build": [
- "default build command"
- ],
- "run": [
- "default run command"
- ]
- },
- "user_params": {
- "build": [],
- "run": []
- }
- },
- {
- "name": "application_5A",
- "description": "This is application 5A",
- "supported_systems": [
- {
- "name": "System 1",
- "variables": {
- "var1": "new value1"
- }
- },
- {
- "name": "System 2",
- "variables": {
- "var2": "new value2"
- },
- "commands": {
- "run": [
- "run command on system 2"
- ]
- }
- }
- ],
- "variables": {
- "var1": "value1",
- "var2": "value2"
- },
- "commands": {
- "build": [
- "default build command"
- ],
- "run": [
- "default run command"
- ]
- },
- "user_params": {
- "build": [],
- "run": []
- }
- },
- {
- "name": "application_5B",
- "description": "This is application 5B",
- "supported_systems": [
- {
- "name": "System 1",
- "variables": {
- "var1": "value for var1 System1",
- "var2": "value for var2 System1"
- }
- },
- {
- "name": "System 2",
- "variables": {
- "var1": "value for var1 System2",
- "var2": "value for var2 System2"
- },
- "commands": {
- "run": [
- "run command on system 2"
- ]
- },
- "user_params": {
- "run": []
- }
- }
- ],
- "commands": {
- "build": [
- "default build command with {variables:var1}"
- ],
- "run": [
- "default run command with {variables:var2}"
- ]
- },
- "user_params": {
- "build": [
- {
- "name": "--param",
- "description": "Sample command param",
- "values": [
- "value1",
- "value2",
- "value3"
- ],
- "default_value": "value1",
- "alias": "param1"
- }
- ],
- "run": [],
- "non_used_command": [
- {
- "name": "--not-used",
- "description": "Not used param anywhere",
- "values": [
- "value1",
- "value2",
- "value3"
- ],
- "default_value": "value1",
- "alias": "param1"
- }
- ]
- }
- }
-]
diff --git a/tests/test_resources/backends/applications/application5/backend-config.json.license b/tests/test_resources/backends/applications/application5/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/backends/applications/application5/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/backends/applications/application6/backend-config.json b/tests/test_resources/backends/applications/application6/backend-config.json
deleted file mode 100644
index 81afebd..0000000
--- a/tests/test_resources/backends/applications/application6/backend-config.json
+++ /dev/null
@@ -1,41 +0,0 @@
-[
- {
- "name": "application_6",
- "description": "This is application 6",
- "supported_systems": [
- {
- "name": "System 6"
- }
- ],
- "commands": {
- "clean": [
- "echo 'clean'"
- ],
- "build": [
- "echo 'build'"
- ],
- "run": [
- "echo 'run {user_params:param1}'"
- ],
- "post_run": [
- "echo 'post_run'"
- ]
- },
- "user_params": {
- "build": [],
- "run": [
- {
- "name": "--param1",
- "description": "Test parameter",
- "values": [
- "value1",
- "value2",
- "value3"
- ],
- "default_value": "value3",
- "alias": "param1"
- }
- ]
- }
- }
-]
diff --git a/tests/test_resources/backends/applications/application6/backend-config.json.license b/tests/test_resources/backends/applications/application6/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/backends/applications/application6/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/backends/applications/readme.txt b/tests/test_resources/backends/applications/readme.txt
deleted file mode 100644
index d3e6fe2..0000000
--- a/tests/test_resources/backends/applications/readme.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-SPDX-License-Identifier: Apache-2.0
-
-File for test purposes
diff --git a/tests/test_resources/backends/systems/system1/backend-config.json b/tests/test_resources/backends/systems/system1/backend-config.json
deleted file mode 100644
index 4454695..0000000
--- a/tests/test_resources/backends/systems/system1/backend-config.json
+++ /dev/null
@@ -1,24 +0,0 @@
-[
- {
- "name": "System 1",
- "description": "This is system 1",
- "commands": {
- "clean": [
- "echo 'clean'"
- ],
- "build": [
- "echo 'build'"
- ],
- "run": [
- "echo 'run'"
- ],
- "post_run": [
- "echo 'post_run'"
- ]
- },
- "user_params": {
- "build": [],
- "run": []
- }
- }
-]
diff --git a/tests/test_resources/backends/systems/system1/backend-config.json.license b/tests/test_resources/backends/systems/system1/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/backends/systems/system1/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/backends/systems/system1/system_artifact/empty.txt b/tests/test_resources/backends/systems/system1/system_artifact/empty.txt
deleted file mode 100644
index 487e9d8..0000000
--- a/tests/test_resources/backends/systems/system1/system_artifact/empty.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/backends/systems/system2/backend-config.json b/tests/test_resources/backends/systems/system2/backend-config.json
deleted file mode 100644
index 3359d3d..0000000
--- a/tests/test_resources/backends/systems/system2/backend-config.json
+++ /dev/null
@@ -1,24 +0,0 @@
-[
- {
- "name": "System 2",
- "description": "This is system 2",
- "commands": {
- "clean": [
- "echo 'clean'"
- ],
- "build": [
- "echo 'build'"
- ],
- "run": [
- "echo 'run'"
- ],
- "post_run": [
- "echo 'post_run'"
- ]
- },
- "user_params": {
- "build": [],
- "run": []
- }
- }
-]
diff --git a/tests/test_resources/backends/systems/system2/backend-config.json.license b/tests/test_resources/backends/systems/system2/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/backends/systems/system2/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/backends/systems/system3/readme.txt b/tests/test_resources/backends/systems/system3/readme.txt
deleted file mode 100644
index aba5a9c..0000000
--- a/tests/test_resources/backends/systems/system3/readme.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-SPDX-License-Identifier: Apache-2.0
-
-This system does not have the json configuration file
diff --git a/tests/test_resources/backends/systems/system4/backend-config.json b/tests/test_resources/backends/systems/system4/backend-config.json
deleted file mode 100644
index daa4025..0000000
--- a/tests/test_resources/backends/systems/system4/backend-config.json
+++ /dev/null
@@ -1,15 +0,0 @@
-[
- {
- "name": "System 4",
- "description": "This is system 4",
- "commands": {
- "run": [
- "echo {system.name}",
- "{application.commands.run:0}"
- ]
- },
- "user_params": {
- "run": []
- }
- }
-]
diff --git a/tests/test_resources/backends/systems/system4/backend-config.json.license b/tests/test_resources/backends/systems/system4/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/backends/systems/system4/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/backends/systems/system6/backend-config.json b/tests/test_resources/backends/systems/system6/backend-config.json
deleted file mode 100644
index 5180799..0000000
--- a/tests/test_resources/backends/systems/system6/backend-config.json
+++ /dev/null
@@ -1,30 +0,0 @@
-[
- {
- "name": "System 6",
- "description": "This is system 6",
- "variables": {
- "var1": "{user_params:sys-param1}"
- },
- "commands": {
- "run": [
- "echo {application.name}",
- "{application.commands.run:0}"
- ]
- },
- "user_params": {
- "run": [
- {
- "name": "--sys-param1",
- "description": "Test parameter",
- "values": [
- "value1",
- "value2",
- "value3"
- ],
- "default_value": "value1",
- "alias": "sys-param1"
- }
- ]
- }
- }
-]
diff --git a/tests/test_resources/backends/systems/system6/backend-config.json.license b/tests/test_resources/backends/systems/system6/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/backends/systems/system6/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/hello_world.json b/tests/test_resources/hello_world.json
deleted file mode 100644
index 28d7bd9..0000000
--- a/tests/test_resources/hello_world.json
+++ /dev/null
@@ -1,53 +0,0 @@
-[
- {
- "name": "Hello world",
- "description": "Sample application that displays 'Hello world!'",
- "supported_systems": [
- "Sample System"
- ],
- "deploy_data": [
- [
- "src",
- "/tmp/"
- ],
- [
- "README",
- "/tmp/README.md"
- ]
- ],
- "commands": {
- "clean": [],
- "build": [],
- "run": [
- "echo 'Hello world!'",
- "ls -l /tmp"
- ],
- "post_run": []
- },
- "user_params": {
- "run": [
- {
- "name": "--choice-param",
- "values": [
- "value_1",
- "value_2"
- ],
- "default_value": "value_1",
- "description": "Choice param"
- },
- {
- "name": "--open-param",
- "values": [],
- "default_value": "value_4",
- "description": "Open param"
- },
- {
- "name": "--enable-flag",
- "default_value": "value_4",
- "description": "Flag param"
- }
- ],
- "build": []
- }
- }
-]
diff --git a/tests/test_resources/hello_world.json.license b/tests/test_resources/hello_world.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/hello_world.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/scripts/test_backend_run b/tests/test_resources/scripts/test_backend_run
deleted file mode 100755
index 548f577..0000000
--- a/tests/test_resources/scripts/test_backend_run
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-
-echo "Hello from script"
->&2 echo "Oops!"
-sleep 100
diff --git a/tests/test_resources/scripts/test_backend_run_script.sh b/tests/test_resources/scripts/test_backend_run_script.sh
deleted file mode 100644
index 548f577..0000000
--- a/tests/test_resources/scripts/test_backend_run_script.sh
+++ /dev/null
@@ -1,8 +0,0 @@
-#!/bin/bash
-
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-# SPDX-License-Identifier: Apache-2.0
-
-echo "Hello from script"
->&2 echo "Oops!"
-sleep 100
diff --git a/tests/test_resources/various/applications/application_with_empty_config/backend-config.json b/tests/test_resources/various/applications/application_with_empty_config/backend-config.json
deleted file mode 100644
index fe51488..0000000
--- a/tests/test_resources/various/applications/application_with_empty_config/backend-config.json
+++ /dev/null
@@ -1 +0,0 @@
-[]
diff --git a/tests/test_resources/various/applications/application_with_empty_config/backend-config.json.license b/tests/test_resources/various/applications/application_with_empty_config/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/various/applications/application_with_empty_config/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/various/applications/application_with_valid_config/backend-config.json b/tests/test_resources/various/applications/application_with_valid_config/backend-config.json
deleted file mode 100644
index a457d9b..0000000
--- a/tests/test_resources/various/applications/application_with_valid_config/backend-config.json
+++ /dev/null
@@ -1,37 +0,0 @@
-[
- {
- "name": "test_application",
- "description": "This is test_application",
- "variables": {
- "build_dir": "build"
- },
- "supported_systems": [
- {
- "name": "System 4"
- }
- ],
- "commands": {
- "build": [
- "cp ../hello_app.txt ."
- ],
- "run": [
- "{application.variables:build_dir}/hello_app.txt"
- ]
- },
- "user_params": {
- "build": [
- {
- "name": "--app",
- "description": "Sample command param",
- "values": [
- "application1",
- "application2",
- "application3"
- ],
- "default_value": "application1"
- }
- ],
- "run": []
- }
- }
-]
diff --git a/tests/test_resources/various/applications/application_with_valid_config/backend-config.json.license b/tests/test_resources/various/applications/application_with_valid_config/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/various/applications/application_with_valid_config/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/various/applications/application_with_wrong_config1/backend-config.json b/tests/test_resources/various/applications/application_with_wrong_config1/backend-config.json
deleted file mode 100644
index 724b31b..0000000
--- a/tests/test_resources/various/applications/application_with_wrong_config1/backend-config.json
+++ /dev/null
@@ -1,2 +0,0 @@
-This is not valid json file
-{
diff --git a/tests/test_resources/various/applications/application_with_wrong_config1/backend-config.json.license b/tests/test_resources/various/applications/application_with_wrong_config1/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/various/applications/application_with_wrong_config1/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/various/applications/application_with_wrong_config2/backend-config.json b/tests/test_resources/various/applications/application_with_wrong_config2/backend-config.json
deleted file mode 100644
index b64e6f8..0000000
--- a/tests/test_resources/various/applications/application_with_wrong_config2/backend-config.json
+++ /dev/null
@@ -1,32 +0,0 @@
-[
- {
- "name": "test_application",
- "description": "This is test_application",
- "variables": {
- "build_dir": "build"
- },
- "commands": {
- "build": [
- "cp ../hello_app.txt ."
- ],
- "run": [
- "{application.variables:build_dir}/hello_app.txt"
- ]
- },
- "user_params": {
- "build": [
- {
- "name": "--app",
- "description": "Sample command param",
- "values": [
- "application1",
- "application2",
- "application3"
- ],
- "default_value": "application1"
- }
- ],
- "run": []
- }
- }
-]
diff --git a/tests/test_resources/various/applications/application_with_wrong_config2/backend-config.json.license b/tests/test_resources/various/applications/application_with_wrong_config2/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/various/applications/application_with_wrong_config2/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/various/applications/application_with_wrong_config3/backend-config.json b/tests/test_resources/various/applications/application_with_wrong_config3/backend-config.json
deleted file mode 100644
index 4a70cdd..0000000
--- a/tests/test_resources/various/applications/application_with_wrong_config3/backend-config.json
+++ /dev/null
@@ -1,37 +0,0 @@
-[
- {
- "name": "test_application",
- "description": "This is test_application",
- "variables": {
- "build_dir": "build"
- },
- "supported_systems": [
- {
- "anme": "System 4"
- }
- ],
- "commands": {
- "build": [
- "cp ../hello_app.txt ."
- ],
- "run": [
- "{application.variables:build_dir}/hello_app.txt"
- ]
- },
- "user_params": {
- "build": [
- {
- "name": "--app",
- "description": "Sample command param",
- "values": [
- "application1",
- "application2",
- "application3"
- ],
- "default_value": "application1"
- }
- ],
- "run": []
- }
- }
-]
diff --git a/tests/test_resources/various/applications/application_with_wrong_config3/backend-config.json.license b/tests/test_resources/various/applications/application_with_wrong_config3/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/various/applications/application_with_wrong_config3/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/various/systems/system_with_empty_config/backend-config.json b/tests/test_resources/various/systems/system_with_empty_config/backend-config.json
deleted file mode 100644
index fe51488..0000000
--- a/tests/test_resources/various/systems/system_with_empty_config/backend-config.json
+++ /dev/null
@@ -1 +0,0 @@
-[]
diff --git a/tests/test_resources/various/systems/system_with_empty_config/backend-config.json.license b/tests/test_resources/various/systems/system_with_empty_config/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/various/systems/system_with_empty_config/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_resources/various/systems/system_with_valid_config/backend-config.json b/tests/test_resources/various/systems/system_with_valid_config/backend-config.json
deleted file mode 100644
index 83c3025..0000000
--- a/tests/test_resources/various/systems/system_with_valid_config/backend-config.json
+++ /dev/null
@@ -1,12 +0,0 @@
-[
- {
- "name": "Test system",
- "description": "This is a test system",
- "commands": {
- "run": []
- },
- "user_params": {
- "run": []
- }
- }
-]
diff --git a/tests/test_resources/various/systems/system_with_valid_config/backend-config.json.license b/tests/test_resources/various/systems/system_with_valid_config/backend-config.json.license
deleted file mode 100644
index 9b83bfc..0000000
--- a/tests/test_resources/various/systems/system_with_valid_config/backend-config.json.license
+++ /dev/null
@@ -1,3 +0,0 @@
-SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
-
-SPDX-License-Identifier: Apache-2.0
diff --git a/tests/test_target_registry.py b/tests/test_target_registry.py
index e6028a9..5012148 100644
--- a/tests/test_target_registry.py
+++ b/tests/test_target_registry.py
@@ -6,6 +6,7 @@ from __future__ import annotations
import pytest
from mlia.core.common import AdviceCategory
+from mlia.target.registry import all_supported_backends
from mlia.target.registry import registry
from mlia.target.registry import supported_advice
from mlia.target.registry import supported_backends
@@ -13,7 +14,7 @@ from mlia.target.registry import supported_targets
@pytest.mark.parametrize(
- "expected_target", ("Cortex-A", "Ethos-U55", "Ethos-U65", "TOSA")
+ "expected_target", ("cortex-a", "ethos-u55", "ethos-u65", "tosa")
)
def test_target_registry(expected_target: str) -> None:
"""Test the target registry."""
@@ -26,9 +27,9 @@ def test_target_registry(expected_target: str) -> None:
@pytest.mark.parametrize(
("target_name", "expected_advices"),
(
- ("Cortex-A", [AdviceCategory.COMPATIBILITY]),
+ ("cortex-a", [AdviceCategory.COMPATIBILITY]),
(
- "Ethos-U55",
+ "ethos-u55",
[
AdviceCategory.COMPATIBILITY,
AdviceCategory.OPTIMIZATION,
@@ -36,14 +37,14 @@ def test_target_registry(expected_target: str) -> None:
],
),
(
- "Ethos-U65",
+ "ethos-u65",
[
AdviceCategory.COMPATIBILITY,
AdviceCategory.OPTIMIZATION,
AdviceCategory.PERFORMANCE,
],
),
- ("TOSA", [AdviceCategory.COMPATIBILITY]),
+ ("tosa", [AdviceCategory.COMPATIBILITY]),
),
)
def test_supported_advice(
@@ -58,10 +59,10 @@ def test_supported_advice(
@pytest.mark.parametrize(
("target_name", "expected_backends"),
(
- ("Cortex-A", ["ArmNNTFLiteDelegate"]),
- ("Ethos-U55", ["Corstone-300", "Corstone-310", "Vela"]),
- ("Ethos-U65", ["Corstone-300", "Corstone-310", "Vela"]),
- ("TOSA", ["TOSA-Checker"]),
+ ("cortex-a", ["ArmNNTFLiteDelegate"]),
+ ("ethos-u55", ["Corstone-300", "Corstone-310", "Vela"]),
+ ("ethos-u65", ["Corstone-300", "Corstone-310", "Vela"]),
+ ("tosa", ["tosa-checker"]),
),
)
def test_supported_backends(target_name: str, expected_backends: list[str]) -> None:
@@ -72,11 +73,22 @@ def test_supported_backends(target_name: str, expected_backends: list[str]) -> N
@pytest.mark.parametrize(
("advice", "expected_targets"),
(
- (AdviceCategory.COMPATIBILITY, ["Cortex-A", "Ethos-U55", "Ethos-U65", "TOSA"]),
- (AdviceCategory.OPTIMIZATION, ["Ethos-U55", "Ethos-U65"]),
- (AdviceCategory.PERFORMANCE, ["Ethos-U55", "Ethos-U65"]),
+ (AdviceCategory.COMPATIBILITY, ["cortex-a", "ethos-u55", "ethos-u65", "tosa"]),
+ (AdviceCategory.OPTIMIZATION, ["ethos-u55", "ethos-u65"]),
+ (AdviceCategory.PERFORMANCE, ["ethos-u55", "ethos-u65"]),
),
)
def test_supported_targets(advice: AdviceCategory, expected_targets: list[str]) -> None:
"""Test function supported_targets()."""
assert sorted(expected_targets) == sorted(supported_targets(advice))
+
+
+def test_all_supported_backends() -> None:
+ """Test function all_supported_backends."""
+ assert all_supported_backends() == {
+ "Vela",
+ "tosa-checker",
+ "ArmNNTFLiteDelegate",
+ "Corstone-310",
+ "Corstone-300",
+ }
diff --git a/tests/test_utils_proc.py b/tests/test_utils_proc.py
new file mode 100644
index 0000000..bea431f
--- /dev/null
+++ b/tests/test_utils_proc.py
@@ -0,0 +1,17 @@
+# SPDX-FileCopyrightText: Copyright 2023, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Tests for process management functions."""
+from unittest.mock import MagicMock
+
+from mlia.utils.proc import Command
+from mlia.utils.proc import process_command_output
+
+
+def test_process_command_output() -> None:
+ """Test function process_command_output."""
+ command = Command(["echo", "-n", "sample message"])
+
+ output_consumer = MagicMock()
+ process_command_output(command, [output_consumer])
+
+ output_consumer.assert_called_once_with("sample message")