From 37959522a805a5e23c930ed79aac84920c3cb208 Mon Sep 17 00:00:00 2001 From: Dmitrii Agibov Date: Fri, 18 Nov 2022 16:34:03 +0000 Subject: Move backends functionality into separate modules - Move backend management/executor code into module backend_core - Create separate module for each backend in "backend" module - Move each backend into corresponding module - Split Vela wrapper into several submodules Change-Id: If01b6774aab6501951212541cc5d7f5aa7c97e95 --- src/mlia/backend/__init__.py | 2 +- src/mlia/backend/application.py | 170 ----- src/mlia/backend/common.py | 517 -------------- src/mlia/backend/config.py | 68 -- src/mlia/backend/corstone/__init__.py | 3 + src/mlia/backend/corstone/install.py | 155 +++++ src/mlia/backend/corstone/performance.py | 233 +++++++ src/mlia/backend/execution.py | 342 --------- src/mlia/backend/executor/__init__.py | 3 + src/mlia/backend/executor/application.py | 170 +++++ src/mlia/backend/executor/common.py | 517 ++++++++++++++ src/mlia/backend/executor/config.py | 68 ++ src/mlia/backend/executor/execution.py | 342 +++++++++ src/mlia/backend/executor/fs.py | 88 +++ src/mlia/backend/executor/output_consumer.py | 67 ++ src/mlia/backend/executor/proc.py | 191 +++++ src/mlia/backend/executor/runner.py | 98 +++ src/mlia/backend/executor/source.py | 207 ++++++ src/mlia/backend/executor/system.py | 178 +++++ src/mlia/backend/fs.py | 88 --- src/mlia/backend/install.py | 450 ++++++++++++ src/mlia/backend/manager.py | 505 ++++++-------- src/mlia/backend/output_consumer.py | 67 -- src/mlia/backend/proc.py | 191 ----- src/mlia/backend/source.py | 207 ------ src/mlia/backend/system.py | 178 ----- src/mlia/backend/tosa_checker/__init__.py | 3 + src/mlia/backend/tosa_checker/install.py | 19 + src/mlia/backend/vela/__init__.py | 3 + src/mlia/backend/vela/compat.py | 158 +++++ src/mlia/backend/vela/compiler.py | 274 ++++++++ src/mlia/backend/vela/performance.py | 97 +++ src/mlia/cli/config.py | 15 +- src/mlia/devices/ethosu/config.py | 4 +- src/mlia/devices/ethosu/data_analysis.py | 2 +- src/mlia/devices/ethosu/data_collection.py | 4 +- src/mlia/devices/ethosu/handlers.py | 2 +- src/mlia/devices/ethosu/operators.py | 4 +- src/mlia/devices/ethosu/performance.py | 25 +- src/mlia/devices/ethosu/reporters.py | 4 +- src/mlia/tools/__init__.py | 3 - src/mlia/tools/metadata/__init__.py | 3 - src/mlia/tools/metadata/common.py | 322 --------- src/mlia/tools/metadata/corstone.py | 417 ----------- src/mlia/tools/metadata/py_package.py | 84 --- src/mlia/tools/vela_wrapper.py | 497 ------------- tests/conftest.py | 6 +- tests/test_api.py | 4 +- tests/test_backend_application.py | 418 ----------- tests/test_backend_common.py | 480 ------------- tests/test_backend_corstone_install.py | 490 +++++++++++++ tests/test_backend_corstone_performance.py | 519 ++++++++++++++ tests/test_backend_execution.py | 212 ------ tests/test_backend_executor_application.py | 422 +++++++++++ tests/test_backend_executor_common.py | 482 +++++++++++++ tests/test_backend_executor_execution.py | 212 ++++++ tests/test_backend_executor_fs.py | 138 ++++ tests/test_backend_executor_output_consumer.py | 100 +++ tests/test_backend_executor_proc.py | 190 +++++ tests/test_backend_executor_runner.py | 254 +++++++ tests/test_backend_executor_source.py | 205 ++++++ tests/test_backend_executor_system.py | 358 ++++++++++ tests/test_backend_fs.py | 134 ---- tests/test_backend_install.py | 124 ++++ tests/test_backend_manager.py | 930 ++++++------------------- tests/test_backend_output_consumer.py | 100 --- tests/test_backend_proc.py | 189 ----- tests/test_backend_source.py | 202 ------ tests/test_backend_system.py | 356 ---------- tests/test_backend_tosa_checker_install.py | 50 ++ tests/test_backend_vela_compat.py | 74 ++ tests/test_backend_vela_compiler.py | 163 +++++ tests/test_backend_vela_performance.py | 64 ++ tests/test_cli_commands.py | 2 +- tests/test_devices_ethosu_config.py | 2 +- tests/test_devices_ethosu_data_analysis.py | 6 +- tests/test_devices_ethosu_data_collection.py | 2 +- tests/test_devices_ethosu_performance.py | 2 +- tests/test_devices_ethosu_reporters.py | 6 +- tests/test_tools_metadata_common.py | 282 -------- tests/test_tools_metadata_corstone.py | 488 ------------- tests/test_tools_metadata_py_package.py | 62 -- tests/test_tools_vela_wrapper.py | 285 -------- 83 files changed, 7648 insertions(+), 7410 deletions(-) delete mode 100644 src/mlia/backend/application.py delete mode 100644 src/mlia/backend/common.py delete mode 100644 src/mlia/backend/config.py create mode 100644 src/mlia/backend/corstone/__init__.py create mode 100644 src/mlia/backend/corstone/install.py create mode 100644 src/mlia/backend/corstone/performance.py delete mode 100644 src/mlia/backend/execution.py create mode 100644 src/mlia/backend/executor/__init__.py create mode 100644 src/mlia/backend/executor/application.py create mode 100644 src/mlia/backend/executor/common.py create mode 100644 src/mlia/backend/executor/config.py create mode 100644 src/mlia/backend/executor/execution.py create mode 100644 src/mlia/backend/executor/fs.py create mode 100644 src/mlia/backend/executor/output_consumer.py create mode 100644 src/mlia/backend/executor/proc.py create mode 100644 src/mlia/backend/executor/runner.py create mode 100644 src/mlia/backend/executor/source.py create mode 100644 src/mlia/backend/executor/system.py delete mode 100644 src/mlia/backend/fs.py create mode 100644 src/mlia/backend/install.py delete mode 100644 src/mlia/backend/output_consumer.py delete mode 100644 src/mlia/backend/proc.py delete mode 100644 src/mlia/backend/source.py delete mode 100644 src/mlia/backend/system.py create mode 100644 src/mlia/backend/tosa_checker/__init__.py create mode 100644 src/mlia/backend/tosa_checker/install.py create mode 100644 src/mlia/backend/vela/__init__.py create mode 100644 src/mlia/backend/vela/compat.py create mode 100644 src/mlia/backend/vela/compiler.py create mode 100644 src/mlia/backend/vela/performance.py delete mode 100644 src/mlia/tools/__init__.py delete mode 100644 src/mlia/tools/metadata/__init__.py delete mode 100644 src/mlia/tools/metadata/common.py delete mode 100644 src/mlia/tools/metadata/corstone.py delete mode 100644 src/mlia/tools/metadata/py_package.py delete mode 100644 src/mlia/tools/vela_wrapper.py delete mode 100644 tests/test_backend_application.py delete mode 100644 tests/test_backend_common.py create mode 100644 tests/test_backend_corstone_install.py create mode 100644 tests/test_backend_corstone_performance.py delete mode 100644 tests/test_backend_execution.py create mode 100644 tests/test_backend_executor_application.py create mode 100644 tests/test_backend_executor_common.py create mode 100644 tests/test_backend_executor_execution.py create mode 100644 tests/test_backend_executor_fs.py create mode 100644 tests/test_backend_executor_output_consumer.py create mode 100644 tests/test_backend_executor_proc.py create mode 100644 tests/test_backend_executor_runner.py create mode 100644 tests/test_backend_executor_source.py create mode 100644 tests/test_backend_executor_system.py delete mode 100644 tests/test_backend_fs.py create mode 100644 tests/test_backend_install.py delete mode 100644 tests/test_backend_output_consumer.py delete mode 100644 tests/test_backend_proc.py delete mode 100644 tests/test_backend_source.py delete mode 100644 tests/test_backend_system.py create mode 100644 tests/test_backend_tosa_checker_install.py create mode 100644 tests/test_backend_vela_compat.py create mode 100644 tests/test_backend_vela_compiler.py create mode 100644 tests/test_backend_vela_performance.py delete mode 100644 tests/test_tools_metadata_common.py delete mode 100644 tests/test_tools_metadata_corstone.py delete mode 100644 tests/test_tools_metadata_py_package.py delete mode 100644 tests/test_tools_vela_wrapper.py diff --git a/src/mlia/backend/__init__.py b/src/mlia/backend/__init__.py index 3d60372..745aa1b 100644 --- a/src/mlia/backend/__init__.py +++ b/src/mlia/backend/__init__.py @@ -1,3 +1,3 @@ # SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. # SPDX-License-Identifier: Apache-2.0 -"""Backend module.""" +"""Backends module.""" diff --git a/src/mlia/backend/application.py b/src/mlia/backend/application.py deleted file mode 100644 index a5d99f7..0000000 --- a/src/mlia/backend/application.py +++ /dev/null @@ -1,170 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Application backend module.""" -from __future__ import annotations - -import re -from pathlib import Path -from typing import Any -from typing import cast -from typing import List - -from mlia.backend.common import Backend -from mlia.backend.common import ConfigurationException -from mlia.backend.common import get_backend_configs -from mlia.backend.common import get_backend_directories -from mlia.backend.common import load_application_configs -from mlia.backend.common import load_config -from mlia.backend.common import remove_backend -from mlia.backend.config import ApplicationConfig -from mlia.backend.config import ExtendedApplicationConfig -from mlia.backend.fs import get_backends_path -from mlia.backend.source import create_destination_and_install -from mlia.backend.source import get_source - - -def get_available_application_directory_names() -> list[str]: - """Return a list of directory names for all available applications.""" - return [entry.name for entry in get_backend_directories("applications")] - - -def get_available_applications() -> list[Application]: - """Return a list with all available applications.""" - available_applications = [] - for config_json in get_backend_configs("applications"): - config_entries = cast(List[ExtendedApplicationConfig], load_config(config_json)) - for config_entry in config_entries: - config_entry["config_location"] = config_json.parent.absolute() - applications = load_applications(config_entry) - available_applications += applications - - return sorted(available_applications, key=lambda application: application.name) - - -def get_application( - application_name: str, system_name: str | None = None -) -> list[Application]: - """Return a list of application instances with provided name.""" - return [ - application - for application in get_available_applications() - if application.name == application_name - and (not system_name or application.can_run_on(system_name)) - ] - - -def install_application(source_path: Path) -> None: - """Install application.""" - try: - source = get_source(source_path) - config = cast(List[ExtendedApplicationConfig], source.config()) - applications_to_install = [ - s for entry in config for s in load_applications(entry) - ] - except Exception as error: - raise ConfigurationException("Unable to read application definition") from error - - if not applications_to_install: - raise ConfigurationException("No application definition found") - - available_applications = get_available_applications() - already_installed = [ - s for s in applications_to_install if s in available_applications - ] - if already_installed: - names = {application.name for application in already_installed} - raise ConfigurationException( - f"Applications [{','.join(names)}] are already installed." - ) - - create_destination_and_install(source, get_backends_path("applications")) - - -def remove_application(directory_name: str) -> None: - """Remove application directory.""" - remove_backend(directory_name, "applications") - - -def get_unique_application_names(system_name: str | None = None) -> list[str]: - """Extract a list of unique application names of all application available.""" - return list( - { - application.name - for application in get_available_applications() - if not system_name or application.can_run_on(system_name) - } - ) - - -class Application(Backend): - """Class for representing a single application component.""" - - def __init__(self, config: ApplicationConfig) -> None: - """Construct a Application instance from a dict.""" - super().__init__(config) - - self.supported_systems = config.get("supported_systems", []) - - def __eq__(self, other: object) -> bool: - """Overload operator ==.""" - if not isinstance(other, Application): - return False - - return ( - super().__eq__(other) - and self.name == other.name - and set(self.supported_systems) == set(other.supported_systems) - ) - - def can_run_on(self, system_name: str) -> bool: - """Check if the application can run on the system passed as argument.""" - return system_name in self.supported_systems - - def get_details(self) -> dict[str, Any]: - """Return dictionary with information about the Application instance.""" - output = { - "type": "application", - "name": self.name, - "description": self.description, - "supported_systems": self.supported_systems, - "commands": self._get_command_details(), - } - - return output - - def remove_unused_params(self) -> None: - """Remove unused params in commands. - - After merging default and system related configuration application - could have parameters that are not being used in commands. They - should be removed. - """ - for command in self.commands.values(): - indexes_or_aliases = [ - m - for cmd_str in command.command_strings - for m in re.findall(r"{user_params:(?P\w+)}", cmd_str) - ] - - only_aliases = all(not item.isnumeric() for item in indexes_or_aliases) - if only_aliases: - used_params = [ - param - for param in command.params - if param.alias in indexes_or_aliases - ] - command.params = used_params - - -def load_applications(config: ExtendedApplicationConfig) -> list[Application]: - """Load application. - - Application configuration could contain different parameters/commands for different - supported systems. For each supported system this function will return separate - Application instance with appropriate configuration. - """ - configs = load_application_configs(config, ApplicationConfig) - applications = [Application(cfg) for cfg in configs] - for application in applications: - application.remove_unused_params() - return applications diff --git a/src/mlia/backend/common.py b/src/mlia/backend/common.py deleted file mode 100644 index 0f04553..0000000 --- a/src/mlia/backend/common.py +++ /dev/null @@ -1,517 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Contain all common functions for the backends.""" -from __future__ import annotations - -import json -import logging -import re -from abc import ABC -from collections import Counter -from pathlib import Path -from typing import Any -from typing import Callable -from typing import cast -from typing import Final -from typing import IO -from typing import Iterable -from typing import Match -from typing import NamedTuple -from typing import Pattern - -from mlia.backend.config import BackendConfig -from mlia.backend.config import BaseBackendConfig -from mlia.backend.config import NamedExecutionConfig -from mlia.backend.config import UserParamConfig -from mlia.backend.config import UserParamsConfig -from mlia.backend.fs import get_backends_path -from mlia.backend.fs import remove_resource -from mlia.backend.fs import ResourceType - - -BACKEND_CONFIG_FILE: Final[str] = "backend-config.json" - - -class ConfigurationException(Exception): - """Configuration exception.""" - - -def get_backend_config(dir_path: Path) -> Path: - """Get path to backendir configuration file.""" - return dir_path / BACKEND_CONFIG_FILE - - -def get_backend_configs(resource_type: ResourceType) -> Iterable[Path]: - """Get path to the backend configs for provided resource_type.""" - return ( - get_backend_config(entry) for entry in get_backend_directories(resource_type) - ) - - -def get_backend_directories(resource_type: ResourceType) -> Iterable[Path]: - """Get path to the backend directories for provided resource_type.""" - return ( - entry - for entry in get_backends_path(resource_type).iterdir() - if is_backend_directory(entry) - ) - - -def is_backend_directory(dir_path: Path) -> bool: - """Check if path is backend's configuration directory.""" - return dir_path.is_dir() and get_backend_config(dir_path).is_file() - - -def remove_backend(directory_name: str, resource_type: ResourceType) -> None: - """Remove backend with provided type and directory_name.""" - if not directory_name: - raise Exception("No directory name provided") - - remove_resource(directory_name, resource_type) - - -def load_config(config: Path | IO[bytes] | None) -> BackendConfig: - """Return a loaded json file.""" - if config is None: - raise Exception("Unable to read config") - - if isinstance(config, Path): - with config.open() as json_file: - return cast(BackendConfig, json.load(json_file)) - - return cast(BackendConfig, json.load(config)) - - -def parse_raw_parameter(parameter: str) -> tuple[str, str | None]: - """Split the parameter string in name and optional value. - - It manages the following cases: - --param=1 -> --param, 1 - --param 1 -> --param, 1 - --flag -> --flag, None - """ - data = re.split(" |=", parameter) - if len(data) == 1: - param_name = data[0] - param_value = None - else: - param_name = " ".join(data[0:-1]) - param_value = data[-1] - return param_name, param_value - - -class DataPaths(NamedTuple): - """DataPaths class.""" - - src: Path - dst: str - - -class Backend(ABC): - """Backend class.""" - - # pylint: disable=too-many-instance-attributes - - def __init__(self, config: BaseBackendConfig): - """Initialize backend.""" - name = config.get("name") - if not name: - raise ConfigurationException("Name is empty") - - self.name = name - self.description = config.get("description", "") - self.config_location = config.get("config_location") - self.variables = config.get("variables", {}) - self.annotations = config.get("annotations", {}) - - self._parse_commands_and_params(config) - - def validate_parameter(self, command_name: str, parameter: str) -> bool: - """Validate the parameter string against the application configuration. - - We take the parameter string, extract the parameter name/value and - check them against the current configuration. - """ - param_name, param_value = parse_raw_parameter(parameter) - valid_param_name = valid_param_value = False - - command = self.commands.get(command_name) - if not command: - raise AttributeError(f"Unknown command: '{command_name}'") - - # Iterate over all available parameters until we have a match. - for param in command.params: - if self._same_parameter(param_name, param): - valid_param_name = True - # This is a non-empty list - if param.values: - # We check if the value is allowed in the configuration - valid_param_value = param_value in param.values - else: - # In this case we don't validate the value and accept - # whatever we have set. - valid_param_value = True - break - - return valid_param_name and valid_param_value - - def __eq__(self, other: object) -> bool: - """Overload operator ==.""" - if not isinstance(other, Backend): - return False - - return ( - self.name == other.name - and self.description == other.description - and self.commands == other.commands - ) - - def __repr__(self) -> str: - """Represent the Backend instance by its name.""" - return self.name - - def _parse_commands_and_params(self, config: BaseBackendConfig) -> None: - """Parse commands and user parameters.""" - self.commands: dict[str, Command] = {} - - commands = config.get("commands") - if commands: - params = config.get("user_params") - - for command_name in commands.keys(): - command_params = self._parse_params(params, command_name) - command_strings = [ - self._substitute_variables(cmd) - for cmd in commands.get(command_name, []) - ] - self.commands[command_name] = Command(command_strings, command_params) - - def _substitute_variables(self, str_val: str) -> str: - """Substitute variables in string. - - Variables is being substituted at backend's creation stage because - they could contain references to other params which will be - resolved later. - """ - if not str_val: - return str_val - - var_pattern: Final[Pattern] = re.compile(r"{variables:(?P\w+)}") - - def var_value(match: Match) -> str: - var_name = match["var_name"] - if var_name not in self.variables: - raise ConfigurationException(f"Unknown variable {var_name}") - - return self.variables[var_name] - - return var_pattern.sub(var_value, str_val) - - @classmethod - def _parse_params( - cls, params: UserParamsConfig | None, command: str - ) -> list[Param]: - if not params: - return [] - - return [cls._parse_param(p) for p in params.get(command, [])] - - @classmethod - def _parse_param(cls, param: UserParamConfig) -> Param: - """Parse a single parameter.""" - name = param.get("name") - if name is not None and not name: - raise ConfigurationException("Parameter has an empty 'name' attribute.") - values = param.get("values", None) - default_value = param.get("default_value", None) - description = param.get("description", "") - alias = param.get("alias") - - return Param( - name=name, - description=description, - values=values, - default_value=default_value, - alias=alias, - ) - - def _get_command_details(self) -> dict: - command_details = { - command_name: command.get_details() - for command_name, command in self.commands.items() - } - return command_details - - def _get_user_param_value(self, user_params: list[str], param: Param) -> str | None: - """Get the user-specified value of a parameter.""" - for user_param in user_params: - user_param_name, user_param_value = parse_raw_parameter(user_param) - if user_param_name == param.name: - warn_message = ( - "The direct use of parameter name is deprecated" - " and might be removed in the future.\n" - f"Please use alias '{param.alias}' instead of " - "'{user_param_name}' to provide the parameter." - ) - logging.warning(warn_message) - - if self._same_parameter(user_param_name, param): - return user_param_value - - return None - - @staticmethod - def _same_parameter(user_param_name_or_alias: str, param: Param) -> bool: - """Compare user parameter name with param name or alias.""" - # Strip the "=" sign in the param_name. This is needed just for - # comparison with the parameters passed by the user. - # The equal sign needs to be honoured when re-building the - # parameter back. - param_name = None if not param.name else param.name.rstrip("=") - return user_param_name_or_alias in [param_name, param.alias] - - def resolved_parameters( - self, command_name: str, user_params: list[str] - ) -> list[tuple[str | None, Param]]: - """Return list of parameters with values.""" - result: list[tuple[str | None, Param]] = [] - command = self.commands.get(command_name) - if not command: - return result - - for param in command.params: - value = self._get_user_param_value(user_params, param) - if not value: - value = param.default_value - result.append((value, param)) - - return result - - def build_command( - self, - command_name: str, - user_params: list[str], - param_resolver: Callable[[str, str, list[tuple[str | None, Param]]], str], - ) -> list[str]: - """ - Return a list of executable command strings. - - Given a command and associated parameters, returns a list of executable command - strings. - """ - command = self.commands.get(command_name) - if not command: - raise ConfigurationException( - f"Command '{command_name}' could not be found." - ) - - commands_to_run = [] - - params_values = self.resolved_parameters(command_name, user_params) - for cmd_str in command.command_strings: - cmd_str = resolve_all_parameters( - cmd_str, param_resolver, command_name, params_values - ) - commands_to_run.append(cmd_str) - - return commands_to_run - - -class Param: - """Class for representing a generic application parameter.""" - - def __init__( # pylint: disable=too-many-arguments - self, - name: str | None, - description: str, - values: list[str] | None = None, - default_value: str | None = None, - alias: str | None = None, - ) -> None: - """Construct a Param instance.""" - if not name and not alias: - raise ConfigurationException( - "Either name, alias or both must be set to identify a parameter." - ) - self.name = name - self.values = values - self.description = description - self.default_value = default_value - self.alias = alias - - def get_details(self) -> dict: - """Return a dictionary with all relevant information of a Param.""" - return {key: value for key, value in self.__dict__.items() if value} - - def __eq__(self, other: object) -> bool: - """Overload operator ==.""" - if not isinstance(other, Param): - return False - - return ( - self.name == other.name - and self.values == other.values - and self.default_value == other.default_value - and self.description == other.description - ) - - -class Command: - """Class for representing a command.""" - - def __init__( - self, command_strings: list[str], params: list[Param] | None = None - ) -> None: - """Construct a Command instance.""" - self.command_strings = command_strings - - if params: - self.params = params - else: - self.params = [] - - self._validate() - - def _validate(self) -> None: - """Validate command.""" - if not self.params: - return - - aliases = [param.alias for param in self.params if param.alias is not None] - repeated_aliases = [ - alias for alias, count in Counter(aliases).items() if count > 1 - ] - - if repeated_aliases: - raise ConfigurationException( - f"Non-unique aliases {', '.join(repeated_aliases)}" - ) - - both_name_and_alias = [ - param.name - for param in self.params - if param.name in aliases and param.name != param.alias - ] - if both_name_and_alias: - raise ConfigurationException( - f"Aliases {', '.join(both_name_and_alias)} could not be used " - "as parameter name." - ) - - def get_details(self) -> dict: - """Return a dictionary with all relevant information of a Command.""" - output = { - "command_strings": self.command_strings, - "user_params": [param.get_details() for param in self.params], - } - return output - - def __eq__(self, other: object) -> bool: - """Overload operator ==.""" - if not isinstance(other, Command): - return False - - return ( - self.command_strings == other.command_strings - and self.params == other.params - ) - - -def resolve_all_parameters( - str_val: str, - param_resolver: Callable[[str, str, list[tuple[str | None, Param]]], str], - command_name: str | None = None, - params_values: list[tuple[str | None, Param]] | None = None, -) -> str: - """Resolve all parameters in the string.""" - if not str_val: - return str_val - - param_pattern: Final[Pattern] = re.compile(r"{(?P[\w.:]+)}") - while param_pattern.findall(str_val): - str_val = param_pattern.sub( - lambda m: param_resolver( - m["param_name"], command_name or "", params_values or [] - ), - str_val, - ) - return str_val - - -def load_application_configs( - config: Any, - config_type: type[Any], - is_system_required: bool = True, -) -> Any: - """Get one config for each system supported by the application. - - The configuration could contain different parameters/commands for different - supported systems. For each supported system this function will return separate - config with appropriate configuration. - """ - merged_configs = [] - supported_systems: list[NamedExecutionConfig] | None = config.get( - "supported_systems" - ) - if not supported_systems: - if is_system_required: - raise ConfigurationException("No supported systems definition provided") - # Create an empty system to be used in the parsing below - supported_systems = [cast(NamedExecutionConfig, {})] - - default_user_params = config.get("user_params", {}) - - def merge_config(system: NamedExecutionConfig) -> Any: - system_name = system.get("name") - if not system_name and is_system_required: - raise ConfigurationException( - "Unable to read supported system definition, name is missed" - ) - - merged_config = config_type(**config) - merged_config["supported_systems"] = [system_name] if system_name else [] - # merge default configuration and specific to the system - merged_config["commands"] = { - **config.get("commands", {}), - **system.get("commands", {}), - } - - params = {} - tool_user_params = system.get("user_params", {}) - command_names = tool_user_params.keys() | default_user_params.keys() - for command_name in command_names: - if command_name not in merged_config["commands"]: - continue - - params_default = default_user_params.get(command_name, []) - params_tool = tool_user_params.get(command_name, []) - if not params_default or not params_tool: - params[command_name] = params_tool or params_default - if params_default and params_tool: - if any(not p.get("alias") for p in params_default): - raise ConfigurationException( - f"Default parameters for command {command_name} " - "should have aliases" - ) - if any(not p.get("alias") for p in params_tool): - raise ConfigurationException( - f"{system_name} parameters for command {command_name} " - "should have aliases." - ) - - merged_by_alias = { - **{p.get("alias"): p for p in params_default}, - **{p.get("alias"): p for p in params_tool}, - } - params[command_name] = list(merged_by_alias.values()) - - merged_config["user_params"] = params - merged_config["variables"] = { - **config.get("variables", {}), - **system.get("variables", {}), - } - return merged_config - - merged_configs = [merge_config(system) for system in supported_systems] - - return merged_configs diff --git a/src/mlia/backend/config.py b/src/mlia/backend/config.py deleted file mode 100644 index dca53da..0000000 --- a/src/mlia/backend/config.py +++ /dev/null @@ -1,68 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Contain definition of backend configuration.""" -from __future__ import annotations - -from pathlib import Path -from typing import Dict -from typing import List -from typing import TypedDict -from typing import Union - - -class UserParamConfig(TypedDict, total=False): - """User parameter configuration.""" - - name: str | None - default_value: str - values: list[str] - description: str - alias: str - - -UserParamsConfig = Dict[str, List[UserParamConfig]] - - -class ExecutionConfig(TypedDict, total=False): - """Execution configuration.""" - - commands: dict[str, list[str]] - user_params: UserParamsConfig - variables: dict[str, str] - - -class NamedExecutionConfig(ExecutionConfig): - """Execution configuration with name.""" - - name: str - - -class BaseBackendConfig(ExecutionConfig, total=False): - """Base backend configuration.""" - - name: str - description: str - config_location: Path - annotations: dict[str, str | list[str]] - - -class ApplicationConfig(BaseBackendConfig, total=False): - """Application configuration.""" - - supported_systems: list[str] - - -class ExtendedApplicationConfig(BaseBackendConfig, total=False): - """Extended application configuration.""" - - supported_systems: list[NamedExecutionConfig] - - -class SystemConfig(BaseBackendConfig, total=False): - """System configuration.""" - - reporting: dict[str, dict] - - -BackendItemConfig = Union[ApplicationConfig, SystemConfig] -BackendConfig = Union[List[ExtendedApplicationConfig], List[SystemConfig]] diff --git a/src/mlia/backend/corstone/__init__.py b/src/mlia/backend/corstone/__init__.py new file mode 100644 index 0000000..a1eac14 --- /dev/null +++ b/src/mlia/backend/corstone/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Corstone backend module.""" diff --git a/src/mlia/backend/corstone/install.py b/src/mlia/backend/corstone/install.py new file mode 100644 index 0000000..2a0e5c9 --- /dev/null +++ b/src/mlia/backend/corstone/install.py @@ -0,0 +1,155 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Module for Corstone based FVPs. + +The import of subprocess module raises a B404 bandit error. MLIA usage of +subprocess is needed and can be considered safe hence disabling the security +check. +""" +from __future__ import annotations + +import logging +import subprocess # nosec +from pathlib import Path + +from mlia.backend.executor.runner import BackendRunner +from mlia.backend.install import BackendInstallation +from mlia.backend.install import BackendMetadata +from mlia.backend.install import CompoundPathChecker +from mlia.backend.install import Installation +from mlia.backend.install import PackagePathChecker +from mlia.backend.install import StaticPathChecker +from mlia.utils.download import DownloadArtifact +from mlia.utils.filesystem import working_directory + + +logger = logging.getLogger(__name__) + + +class Corstone300Installer: + """Helper class that wraps Corstone 300 installation logic.""" + + def __call__(self, eula_agreement: bool, dist_dir: Path) -> Path: + """Install Corstone-300 and return path to the models.""" + with working_directory(dist_dir): + install_dir = "corstone-300" + try: + fvp_install_cmd = [ + "./FVP_Corstone_SSE-300.sh", + "-q", + "-d", + install_dir, + ] + if not eula_agreement: + fvp_install_cmd += [ + "--nointeractive", + "--i-agree-to-the-contained-eula", + ] + + # The following line raises a B603 error for bandit. In this + # specific case, the input is pretty much static and cannot be + # changed byt the user hence disabling the security check for + # this instance + subprocess.check_call(fvp_install_cmd) # nosec + except subprocess.CalledProcessError as err: + raise Exception( + "Error occurred during Corstone-300 installation" + ) from err + + return dist_dir / install_dir + + +def get_corstone_300_installation() -> Installation: + """Get Corstone-300 installation.""" + corstone_300 = BackendInstallation( + backend_runner=BackendRunner(), + # pylint: disable=line-too-long + metadata=BackendMetadata( + name="Corstone-300", + description="Corstone-300 FVP", + system_config="backend_configs/systems/corstone-300/backend-config.json", + apps_resources=[], + fvp_dir_name="corstone_300", + download_artifact=DownloadArtifact( + name="Corstone-300 FVP", + url="https://developer.arm.com/-/media/Arm%20Developer%20Community/Downloads/OSS/FVP/Corstone-300/FVP_Corstone_SSE-300_11.16_26.tgz", + filename="FVP_Corstone_SSE-300_11.16_26.tgz", + version="11.16_26", + sha256_hash="e26139be756b5003a30d978c629de638aed1934d597dc24a17043d4708e934d7", + ), + supported_platforms=["Linux"], + ), + # pylint: enable=line-too-long + path_checker=CompoundPathChecker( + PackagePathChecker( + expected_files=[ + "models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55", + "models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U65", + ], + backend_subfolder="models/Linux64_GCC-6.4", + ), + StaticPathChecker( + static_backend_path=Path("/opt/VHT"), + expected_files=[ + "VHT_Corstone_SSE-300_Ethos-U55", + "VHT_Corstone_SSE-300_Ethos-U65", + ], + copy_source=False, + system_config=( + "backend_configs/systems/corstone-300-vht/backend-config.json" + ), + ), + ), + backend_installer=Corstone300Installer(), + ) + + return corstone_300 + + +def get_corstone_310_installation() -> Installation: + """Get Corstone-310 installation.""" + corstone_310 = BackendInstallation( + backend_runner=BackendRunner(), + # pylint: disable=line-too-long + metadata=BackendMetadata( + name="Corstone-310", + description="Corstone-310 FVP", + system_config="backend_configs/systems/corstone-310/backend-config.json", + apps_resources=[], + fvp_dir_name="corstone_310", + download_artifact=None, + supported_platforms=["Linux"], + ), + # pylint: enable=line-too-long + path_checker=CompoundPathChecker( + PackagePathChecker( + expected_files=[ + "models/Linux64_GCC-9.3/FVP_Corstone_SSE-310", + "models/Linux64_GCC-9.3/FVP_Corstone_SSE-310_Ethos-U65", + ], + backend_subfolder="models/Linux64_GCC-9.3", + ), + StaticPathChecker( + static_backend_path=Path("/opt/VHT"), + expected_files=[ + "VHT_Corstone_SSE-310", + "VHT_Corstone_SSE-310_Ethos-U65", + ], + copy_source=False, + system_config=( + "backend_configs/systems/corstone-310-vht/backend-config.json" + ), + ), + ), + backend_installer=None, + ) + + return corstone_310 + + +def get_corstone_installations() -> list[Installation]: + """Get Corstone installations.""" + return [ + get_corstone_300_installation(), + get_corstone_310_installation(), + ] diff --git a/src/mlia/backend/corstone/performance.py b/src/mlia/backend/corstone/performance.py new file mode 100644 index 0000000..5aabfa5 --- /dev/null +++ b/src/mlia/backend/corstone/performance.py @@ -0,0 +1,233 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Module for backend integration.""" +from __future__ import annotations + +import logging +from abc import ABC +from abc import abstractmethod +from dataclasses import dataclass +from pathlib import Path +from typing import Literal + +from mlia.backend.executor.output_consumer import Base64OutputConsumer +from mlia.backend.executor.output_consumer import OutputConsumer +from mlia.backend.executor.runner import BackendRunner +from mlia.backend.executor.runner import ExecutionParams +from mlia.backend.install import get_application_name +from mlia.backend.install import get_system_name + + +logger = logging.getLogger(__name__) + + +@dataclass +class DeviceInfo: + """Device information.""" + + device_type: Literal["ethos-u55", "ethos-u65"] + mac: int + + +@dataclass +class ModelInfo: + """Model info.""" + + model_path: Path + + +@dataclass +class PerformanceMetrics: + """Performance metrics parsed from generic inference output.""" + + npu_active_cycles: int + npu_idle_cycles: int + npu_total_cycles: int + npu_axi0_rd_data_beat_received: int + npu_axi0_wr_data_beat_written: int + npu_axi1_rd_data_beat_received: int + + +class LogWriter(OutputConsumer): + """Redirect output to the logger.""" + + def feed(self, line: str) -> bool: + """Process line from the output.""" + logger.debug(line.strip()) + return False + + +class GenericInferenceOutputParser(Base64OutputConsumer): + """Generic inference app output parser.""" + + def __init__(self) -> None: + """Init generic inference output parser instance.""" + super().__init__() + self._map = { + "NPU ACTIVE": "npu_active_cycles", + "NPU IDLE": "npu_idle_cycles", + "NPU TOTAL": "npu_total_cycles", + "NPU AXI0_RD_DATA_BEAT_RECEIVED": "npu_axi0_rd_data_beat_received", + "NPU AXI0_WR_DATA_BEAT_WRITTEN": "npu_axi0_wr_data_beat_written", + "NPU AXI1_RD_DATA_BEAT_RECEIVED": "npu_axi1_rd_data_beat_received", + } + + @property + def result(self) -> dict: + """Merge the raw results and map the names to the right output names.""" + merged_result = {} + for raw_result in self.parsed_output: + for profiling_result in raw_result: + for sample in profiling_result["samples"]: + name, values = (sample["name"], sample["value"]) + if name in merged_result: + raise KeyError( + f"Duplicate key '{name}' in base64 output.", + ) + new_name = self._map[name] + merged_result[new_name] = values[0] + return merged_result + + def is_ready(self) -> bool: + """Return true if all expected data has been parsed.""" + return set(self.result.keys()) == set(self._map.values()) + + def missed_keys(self) -> set[str]: + """Return a set of the keys that have not been found in the output.""" + return set(self._map.values()) - set(self.result.keys()) + + +class GenericInferenceRunner(ABC): + """Abstract class for generic inference runner.""" + + def __init__(self, backend_runner: BackendRunner): + """Init generic inference runner instance.""" + self.backend_runner = backend_runner + + def run( + self, model_info: ModelInfo, output_consumers: list[OutputConsumer] + ) -> None: + """Run generic inference for the provided device/model.""" + execution_params = self.get_execution_params(model_info) + + ctx = self.backend_runner.run_application(execution_params) + if ctx.stdout is not None: + ctx.stdout = self.consume_output(ctx.stdout, output_consumers) + + @abstractmethod + def get_execution_params(self, model_info: ModelInfo) -> ExecutionParams: + """Get execution params for the provided model.""" + + def check_system_and_application(self, system_name: str, app_name: str) -> None: + """Check if requested system and application installed.""" + if not self.backend_runner.is_system_installed(system_name): + raise Exception(f"System {system_name} is not installed") + + if not self.backend_runner.is_application_installed(app_name, system_name): + raise Exception( + f"Application {app_name} for the system {system_name} " + "is not installed" + ) + + @staticmethod + def consume_output(output: bytearray, consumers: list[OutputConsumer]) -> bytearray: + """ + Pass program's output to the consumers and filter it. + + Returns the filtered output. + """ + filtered_output = bytearray() + for line_bytes in output.splitlines(): + line = line_bytes.decode("utf-8") + remove_line = False + for consumer in consumers: + if consumer.feed(line): + remove_line = True + if not remove_line: + filtered_output.extend(line_bytes) + + return filtered_output + + +class GenericInferenceRunnerEthosU(GenericInferenceRunner): + """Generic inference runner on U55/65.""" + + def __init__( + self, backend_runner: BackendRunner, device_info: DeviceInfo, backend: str + ) -> None: + """Init generic inference runner instance.""" + super().__init__(backend_runner) + + system_name, app_name = self.resolve_system_and_app(device_info, backend) + self.system_name = system_name + self.app_name = app_name + self.device_info = device_info + + @staticmethod + def resolve_system_and_app( + device_info: DeviceInfo, backend: str + ) -> tuple[str, str]: + """Find appropriate system and application for the provided device/backend.""" + try: + system_name = get_system_name(backend, device_info.device_type) + except KeyError as ex: + raise RuntimeError( + f"Unsupported device {device_info.device_type} " + f"for backend {backend}" + ) from ex + + try: + app_name = get_application_name(system_name) + except KeyError as err: + raise RuntimeError(f"System {system_name} is not installed") from err + + return system_name, app_name + + def get_execution_params(self, model_info: ModelInfo) -> ExecutionParams: + """Get execution params for Ethos-U55/65.""" + self.check_system_and_application(self.system_name, self.app_name) + + system_params = [ + f"mac={self.device_info.mac}", + f"input_file={model_info.model_path.absolute()}", + ] + + return ExecutionParams( + self.app_name, + self.system_name, + [], + system_params, + ) + + +def get_generic_runner(device_info: DeviceInfo, backend: str) -> GenericInferenceRunner: + """Get generic runner for provided device and backend.""" + backend_runner = get_backend_runner() + return GenericInferenceRunnerEthosU(backend_runner, device_info, backend) + + +def estimate_performance( + model_info: ModelInfo, device_info: DeviceInfo, backend: str +) -> PerformanceMetrics: + """Get performance estimations.""" + output_parser = GenericInferenceOutputParser() + output_consumers = [output_parser, LogWriter()] + + generic_runner = get_generic_runner(device_info, backend) + generic_runner.run(model_info, output_consumers) + + if not output_parser.is_ready(): + missed_data = ",".join(output_parser.missed_keys()) + logger.debug("Unable to get performance metrics, missed data %s", missed_data) + raise Exception("Unable to get performance metrics, insufficient data") + + return PerformanceMetrics(**output_parser.result) + + +def get_backend_runner() -> BackendRunner: + """ + Return BackendRunner instance. + + Note: This is needed for the unit tests. + """ + return BackendRunner() diff --git a/src/mlia/backend/execution.py b/src/mlia/backend/execution.py deleted file mode 100644 index 5c8e53f..0000000 --- a/src/mlia/backend/execution.py +++ /dev/null @@ -1,342 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Application execution module.""" -from __future__ import annotations - -import logging -import re -from typing import cast - -from mlia.backend.application import Application -from mlia.backend.application import get_application -from mlia.backend.common import Backend -from mlia.backend.common import ConfigurationException -from mlia.backend.common import Param -from mlia.backend.system import get_system -from mlia.backend.system import System - -logger = logging.getLogger(__name__) - - -class AnotherInstanceIsRunningException(Exception): - """Concurrent execution error.""" - - -class ExecutionContext: # pylint: disable=too-few-public-methods - """Command execution context.""" - - def __init__( - self, - app: Application, - app_params: list[str], - system: System, - system_params: list[str], - ): - """Init execution context.""" - self.app = app - self.app_params = app_params - self.system = system - self.system_params = system_params - - self.param_resolver = ParamResolver(self) - - self.stdout: bytearray | None = None - self.stderr: bytearray | None = None - - -class ParamResolver: - """Parameter resolver.""" - - def __init__(self, context: ExecutionContext): - """Init parameter resolver.""" - self.ctx = context - - @staticmethod - def resolve_user_params( - cmd_name: str | None, - index_or_alias: str, - resolved_params: list[tuple[str | None, Param]] | None, - ) -> str: - """Resolve user params.""" - if not cmd_name or resolved_params is None: - raise ConfigurationException("Unable to resolve user params") - - param_value: str | None = None - param: Param | None = None - - if index_or_alias.isnumeric(): - i = int(index_or_alias) - if i not in range(len(resolved_params)): - raise ConfigurationException( - f"Invalid index {i} for user params of command {cmd_name}" - ) - param_value, param = resolved_params[i] - else: - for val, par in resolved_params: - if par.alias == index_or_alias: - param_value, param = val, par - break - - if param is None: - raise ConfigurationException( - f"No user parameter for command '{cmd_name}' with " - f"alias '{index_or_alias}'." - ) - - if param_value: - # We need to handle to cases of parameters here: - # 1) Optional parameters (non-positional with a name and value) - # 2) Positional parameters (value only, no name needed) - # Default to empty strings for positional arguments - param_name = "" - separator = "" - if param.name is not None: - # A valid param name means we have an optional/non-positional argument: - # The separator is an empty string in case the param_name - # has an equal sign as we have to honour it. - # If the parameter doesn't end with an equal sign then a - # space character is injected to split the parameter name - # and its value - param_name = param.name - separator = "" if param.name.endswith("=") else " " - - return f"{param_name}{separator}{param_value}" - - if param.name is None: - raise ConfigurationException( - f"Missing user parameter with alias '{index_or_alias}' for " - f"command '{cmd_name}'." - ) - - return param.name # flag: just return the parameter name - - def resolve_commands_and_params( - self, backend_type: str, cmd_name: str, return_params: bool, index_or_alias: str - ) -> str: - """Resolve command or command's param value.""" - if backend_type == "system": - backend = cast(Backend, self.ctx.system) - backend_params = self.ctx.system_params - else: # Application backend - backend = cast(Backend, self.ctx.app) - backend_params = self.ctx.app_params - - if cmd_name not in backend.commands: - raise ConfigurationException(f"Command {cmd_name} not found") - - if return_params: - params = backend.resolved_parameters(cmd_name, backend_params) - if index_or_alias.isnumeric(): - i = int(index_or_alias) - if i not in range(len(params)): - raise ConfigurationException( - f"Invalid parameter index {i} for command {cmd_name}" - ) - - param_value = params[i][0] - else: - param_value = None - for value, param in params: - if param.alias == index_or_alias: - param_value = value - break - - if not param_value: - raise ConfigurationException( - "No value for parameter with index or " - f"alias {index_or_alias} of command {cmd_name}." - ) - return param_value - - if not index_or_alias.isnumeric(): - raise ConfigurationException(f"Bad command index {index_or_alias}") - - i = int(index_or_alias) - commands = backend.build_command(cmd_name, backend_params, self.param_resolver) - if i not in range(len(commands)): - raise ConfigurationException(f"Invalid index {i} for command {cmd_name}") - - return commands[i] - - def resolve_variables(self, backend_type: str, var_name: str) -> str: - """Resolve variable value.""" - if backend_type == "system": - backend = cast(Backend, self.ctx.system) - else: # Application backend - backend = cast(Backend, self.ctx.app) - - if var_name not in backend.variables: - raise ConfigurationException(f"Unknown variable {var_name}") - - return backend.variables[var_name] - - def param_matcher( - self, - param_name: str, - cmd_name: str | None, - resolved_params: list[tuple[str | None, Param]] | None, - ) -> str: - """Regexp to resolve a param from the param_name.""" - # this pattern supports parameter names like "application.commands.run:0" and - # "system.commands.run.params:0" - # Note: 'software' is included for backward compatibility. - commands_and_params_match = re.match( - r"(?Papplication|software|system)[.]commands[.]" - r"(?P\w+)" - r"(?P[.]params|)[:]" - r"(?P\w+)", - param_name, - ) - - if commands_and_params_match: - backend_type, cmd_name, return_params, index_or_alias = ( - commands_and_params_match["type"], - commands_and_params_match["name"], - commands_and_params_match["params"], - commands_and_params_match["index_or_alias"], - ) - return self.resolve_commands_and_params( - backend_type, cmd_name, bool(return_params), index_or_alias - ) - - # Note: 'software' is included for backward compatibility. - variables_match = re.match( - r"(?Papplication|software|system)[.]variables:(?P\w+)", - param_name, - ) - if variables_match: - backend_type, var_name = ( - variables_match["type"], - variables_match["var_name"], - ) - return self.resolve_variables(backend_type, var_name) - - user_params_match = re.match(r"user_params:(?P\w+)", param_name) - if user_params_match: - index_or_alias = user_params_match["index_or_alias"] - return self.resolve_user_params(cmd_name, index_or_alias, resolved_params) - - raise ConfigurationException(f"Unable to resolve parameter {param_name}") - - def param_resolver( - self, - param_name: str, - cmd_name: str | None = None, - resolved_params: list[tuple[str | None, Param]] | None = None, - ) -> str: - """Resolve parameter value based on current execution context.""" - # Note: 'software.*' is included for backward compatibility. - resolved_param = None - if param_name in ["application.name", "software.name"]: - resolved_param = self.ctx.app.name - elif param_name in ["application.description", "software.description"]: - resolved_param = self.ctx.app.description - elif self.ctx.app.config_location and ( - param_name in ["application.config_dir", "software.config_dir"] - ): - resolved_param = str(self.ctx.app.config_location.absolute()) - elif self.ctx.system is not None: - if param_name == "system.name": - resolved_param = self.ctx.system.name - elif param_name == "system.description": - resolved_param = self.ctx.system.description - elif param_name == "system.config_dir" and self.ctx.system.config_location: - resolved_param = str(self.ctx.system.config_location.absolute()) - - if not resolved_param: - resolved_param = self.param_matcher(param_name, cmd_name, resolved_params) - return resolved_param - - def __call__( - self, - param_name: str, - cmd_name: str | None = None, - resolved_params: list[tuple[str | None, Param]] | None = None, - ) -> str: - """Resolve provided parameter.""" - return self.param_resolver(param_name, cmd_name, resolved_params) - - -def validate_parameters( - backend: Backend, command_names: list[str], params: list[str] -) -> None: - """Check parameters passed to backend.""" - for param in params: - acceptable = any( - backend.validate_parameter(command_name, param) - for command_name in command_names - if command_name in backend.commands - ) - - if not acceptable: - backend_type = "System" if isinstance(backend, System) else "Application" - raise ValueError( - f"{backend_type} parameter '{param}' not valid for " - f"command '{' or '.join(command_names)}'." - ) - - -def get_application_by_name_and_system( - application_name: str, system_name: str -) -> Application: - """Get application.""" - applications = get_application(application_name, system_name) - if not applications: - raise ValueError( - f"Application '{application_name}' doesn't support the " - f"system '{system_name}'." - ) - - if len(applications) != 1: - raise ValueError( - f"Error during getting application {application_name} for the " - f"system {system_name}." - ) - - return applications[0] - - -def get_application_and_system( - application_name: str, system_name: str -) -> tuple[Application, System]: - """Return application and system by provided names.""" - system = get_system(system_name) - if not system: - raise ValueError(f"System {system_name} is not found.") - - application = get_application_by_name_and_system(application_name, system_name) - - return application, system - - -def run_application( - application_name: str, - application_params: list[str], - system_name: str, - system_params: list[str], -) -> ExecutionContext: - """Run application on the provided system.""" - application, system = get_application_and_system(application_name, system_name) - validate_parameters(application, ["run"], application_params) - validate_parameters(system, ["run"], system_params) - - ctx = ExecutionContext( - app=application, - app_params=application_params, - system=system, - system_params=system_params, - ) - - logger.debug("Generating commands to execute") - commands_to_run = ctx.system.build_command( - "run", ctx.system_params, ctx.param_resolver - ) - - for command in commands_to_run: - logger.debug("Running: %s", command) - exit_code, ctx.stdout, ctx.stderr = ctx.system.run(command) - - if exit_code != 0: - logger.warning("Application exited with exit code %i", exit_code) - - return ctx diff --git a/src/mlia/backend/executor/__init__.py b/src/mlia/backend/executor/__init__.py new file mode 100644 index 0000000..3d60372 --- /dev/null +++ b/src/mlia/backend/executor/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Backend module.""" diff --git a/src/mlia/backend/executor/application.py b/src/mlia/backend/executor/application.py new file mode 100644 index 0000000..738ac4e --- /dev/null +++ b/src/mlia/backend/executor/application.py @@ -0,0 +1,170 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Application backend module.""" +from __future__ import annotations + +import re +from pathlib import Path +from typing import Any +from typing import cast +from typing import List + +from mlia.backend.executor.common import Backend +from mlia.backend.executor.common import ConfigurationException +from mlia.backend.executor.common import get_backend_configs +from mlia.backend.executor.common import get_backend_directories +from mlia.backend.executor.common import load_application_configs +from mlia.backend.executor.common import load_config +from mlia.backend.executor.common import remove_backend +from mlia.backend.executor.config import ApplicationConfig +from mlia.backend.executor.config import ExtendedApplicationConfig +from mlia.backend.executor.fs import get_backends_path +from mlia.backend.executor.source import create_destination_and_install +from mlia.backend.executor.source import get_source + + +def get_available_application_directory_names() -> list[str]: + """Return a list of directory names for all available applications.""" + return [entry.name for entry in get_backend_directories("applications")] + + +def get_available_applications() -> list[Application]: + """Return a list with all available applications.""" + available_applications = [] + for config_json in get_backend_configs("applications"): + config_entries = cast(List[ExtendedApplicationConfig], load_config(config_json)) + for config_entry in config_entries: + config_entry["config_location"] = config_json.parent.absolute() + applications = load_applications(config_entry) + available_applications += applications + + return sorted(available_applications, key=lambda application: application.name) + + +def get_application( + application_name: str, system_name: str | None = None +) -> list[Application]: + """Return a list of application instances with provided name.""" + return [ + application + for application in get_available_applications() + if application.name == application_name + and (not system_name or application.can_run_on(system_name)) + ] + + +def install_application(source_path: Path) -> None: + """Install application.""" + try: + source = get_source(source_path) + config = cast(List[ExtendedApplicationConfig], source.config()) + applications_to_install = [ + s for entry in config for s in load_applications(entry) + ] + except Exception as error: + raise ConfigurationException("Unable to read application definition") from error + + if not applications_to_install: + raise ConfigurationException("No application definition found") + + available_applications = get_available_applications() + already_installed = [ + s for s in applications_to_install if s in available_applications + ] + if already_installed: + names = {application.name for application in already_installed} + raise ConfigurationException( + f"Applications [{','.join(names)}] are already installed." + ) + + create_destination_and_install(source, get_backends_path("applications")) + + +def remove_application(directory_name: str) -> None: + """Remove application directory.""" + remove_backend(directory_name, "applications") + + +def get_unique_application_names(system_name: str | None = None) -> list[str]: + """Extract a list of unique application names of all application available.""" + return list( + { + application.name + for application in get_available_applications() + if not system_name or application.can_run_on(system_name) + } + ) + + +class Application(Backend): + """Class for representing a single application component.""" + + def __init__(self, config: ApplicationConfig) -> None: + """Construct a Application instance from a dict.""" + super().__init__(config) + + self.supported_systems = config.get("supported_systems", []) + + def __eq__(self, other: object) -> bool: + """Overload operator ==.""" + if not isinstance(other, Application): + return False + + return ( + super().__eq__(other) + and self.name == other.name + and set(self.supported_systems) == set(other.supported_systems) + ) + + def can_run_on(self, system_name: str) -> bool: + """Check if the application can run on the system passed as argument.""" + return system_name in self.supported_systems + + def get_details(self) -> dict[str, Any]: + """Return dictionary with information about the Application instance.""" + output = { + "type": "application", + "name": self.name, + "description": self.description, + "supported_systems": self.supported_systems, + "commands": self._get_command_details(), + } + + return output + + def remove_unused_params(self) -> None: + """Remove unused params in commands. + + After merging default and system related configuration application + could have parameters that are not being used in commands. They + should be removed. + """ + for command in self.commands.values(): + indexes_or_aliases = [ + m + for cmd_str in command.command_strings + for m in re.findall(r"{user_params:(?P\w+)}", cmd_str) + ] + + only_aliases = all(not item.isnumeric() for item in indexes_or_aliases) + if only_aliases: + used_params = [ + param + for param in command.params + if param.alias in indexes_or_aliases + ] + command.params = used_params + + +def load_applications(config: ExtendedApplicationConfig) -> list[Application]: + """Load application. + + Application configuration could contain different parameters/commands for different + supported systems. For each supported system this function will return separate + Application instance with appropriate configuration. + """ + configs = load_application_configs(config, ApplicationConfig) + applications = [Application(cfg) for cfg in configs] + for application in applications: + application.remove_unused_params() + return applications diff --git a/src/mlia/backend/executor/common.py b/src/mlia/backend/executor/common.py new file mode 100644 index 0000000..48dbd4a --- /dev/null +++ b/src/mlia/backend/executor/common.py @@ -0,0 +1,517 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Contain all common functions for the backends.""" +from __future__ import annotations + +import json +import logging +import re +from abc import ABC +from collections import Counter +from pathlib import Path +from typing import Any +from typing import Callable +from typing import cast +from typing import Final +from typing import IO +from typing import Iterable +from typing import Match +from typing import NamedTuple +from typing import Pattern + +from mlia.backend.executor.config import BackendConfig +from mlia.backend.executor.config import BaseBackendConfig +from mlia.backend.executor.config import NamedExecutionConfig +from mlia.backend.executor.config import UserParamConfig +from mlia.backend.executor.config import UserParamsConfig +from mlia.backend.executor.fs import get_backends_path +from mlia.backend.executor.fs import remove_resource +from mlia.backend.executor.fs import ResourceType + + +BACKEND_CONFIG_FILE: Final[str] = "backend-config.json" + + +class ConfigurationException(Exception): + """Configuration exception.""" + + +def get_backend_config(dir_path: Path) -> Path: + """Get path to backendir configuration file.""" + return dir_path / BACKEND_CONFIG_FILE + + +def get_backend_configs(resource_type: ResourceType) -> Iterable[Path]: + """Get path to the backend configs for provided resource_type.""" + return ( + get_backend_config(entry) for entry in get_backend_directories(resource_type) + ) + + +def get_backend_directories(resource_type: ResourceType) -> Iterable[Path]: + """Get path to the backend directories for provided resource_type.""" + return ( + entry + for entry in get_backends_path(resource_type).iterdir() + if is_backend_directory(entry) + ) + + +def is_backend_directory(dir_path: Path) -> bool: + """Check if path is backend's configuration directory.""" + return dir_path.is_dir() and get_backend_config(dir_path).is_file() + + +def remove_backend(directory_name: str, resource_type: ResourceType) -> None: + """Remove backend with provided type and directory_name.""" + if not directory_name: + raise Exception("No directory name provided") + + remove_resource(directory_name, resource_type) + + +def load_config(config: Path | IO[bytes] | None) -> BackendConfig: + """Return a loaded json file.""" + if config is None: + raise Exception("Unable to read config") + + if isinstance(config, Path): + with config.open() as json_file: + return cast(BackendConfig, json.load(json_file)) + + return cast(BackendConfig, json.load(config)) + + +def parse_raw_parameter(parameter: str) -> tuple[str, str | None]: + """Split the parameter string in name and optional value. + + It manages the following cases: + --param=1 -> --param, 1 + --param 1 -> --param, 1 + --flag -> --flag, None + """ + data = re.split(" |=", parameter) + if len(data) == 1: + param_name = data[0] + param_value = None + else: + param_name = " ".join(data[0:-1]) + param_value = data[-1] + return param_name, param_value + + +class DataPaths(NamedTuple): + """DataPaths class.""" + + src: Path + dst: str + + +class Backend(ABC): + """Backend class.""" + + # pylint: disable=too-many-instance-attributes + + def __init__(self, config: BaseBackendConfig): + """Initialize backend.""" + name = config.get("name") + if not name: + raise ConfigurationException("Name is empty") + + self.name = name + self.description = config.get("description", "") + self.config_location = config.get("config_location") + self.variables = config.get("variables", {}) + self.annotations = config.get("annotations", {}) + + self._parse_commands_and_params(config) + + def validate_parameter(self, command_name: str, parameter: str) -> bool: + """Validate the parameter string against the application configuration. + + We take the parameter string, extract the parameter name/value and + check them against the current configuration. + """ + param_name, param_value = parse_raw_parameter(parameter) + valid_param_name = valid_param_value = False + + command = self.commands.get(command_name) + if not command: + raise AttributeError(f"Unknown command: '{command_name}'") + + # Iterate over all available parameters until we have a match. + for param in command.params: + if self._same_parameter(param_name, param): + valid_param_name = True + # This is a non-empty list + if param.values: + # We check if the value is allowed in the configuration + valid_param_value = param_value in param.values + else: + # In this case we don't validate the value and accept + # whatever we have set. + valid_param_value = True + break + + return valid_param_name and valid_param_value + + def __eq__(self, other: object) -> bool: + """Overload operator ==.""" + if not isinstance(other, Backend): + return False + + return ( + self.name == other.name + and self.description == other.description + and self.commands == other.commands + ) + + def __repr__(self) -> str: + """Represent the Backend instance by its name.""" + return self.name + + def _parse_commands_and_params(self, config: BaseBackendConfig) -> None: + """Parse commands and user parameters.""" + self.commands: dict[str, Command] = {} + + commands = config.get("commands") + if commands: + params = config.get("user_params") + + for command_name in commands.keys(): + command_params = self._parse_params(params, command_name) + command_strings = [ + self._substitute_variables(cmd) + for cmd in commands.get(command_name, []) + ] + self.commands[command_name] = Command(command_strings, command_params) + + def _substitute_variables(self, str_val: str) -> str: + """Substitute variables in string. + + Variables is being substituted at backend's creation stage because + they could contain references to other params which will be + resolved later. + """ + if not str_val: + return str_val + + var_pattern: Final[Pattern] = re.compile(r"{variables:(?P\w+)}") + + def var_value(match: Match) -> str: + var_name = match["var_name"] + if var_name not in self.variables: + raise ConfigurationException(f"Unknown variable {var_name}") + + return self.variables[var_name] + + return var_pattern.sub(var_value, str_val) + + @classmethod + def _parse_params( + cls, params: UserParamsConfig | None, command: str + ) -> list[Param]: + if not params: + return [] + + return [cls._parse_param(p) for p in params.get(command, [])] + + @classmethod + def _parse_param(cls, param: UserParamConfig) -> Param: + """Parse a single parameter.""" + name = param.get("name") + if name is not None and not name: + raise ConfigurationException("Parameter has an empty 'name' attribute.") + values = param.get("values", None) + default_value = param.get("default_value", None) + description = param.get("description", "") + alias = param.get("alias") + + return Param( + name=name, + description=description, + values=values, + default_value=default_value, + alias=alias, + ) + + def _get_command_details(self) -> dict: + command_details = { + command_name: command.get_details() + for command_name, command in self.commands.items() + } + return command_details + + def _get_user_param_value(self, user_params: list[str], param: Param) -> str | None: + """Get the user-specified value of a parameter.""" + for user_param in user_params: + user_param_name, user_param_value = parse_raw_parameter(user_param) + if user_param_name == param.name: + warn_message = ( + "The direct use of parameter name is deprecated" + " and might be removed in the future.\n" + f"Please use alias '{param.alias}' instead of " + "'{user_param_name}' to provide the parameter." + ) + logging.warning(warn_message) + + if self._same_parameter(user_param_name, param): + return user_param_value + + return None + + @staticmethod + def _same_parameter(user_param_name_or_alias: str, param: Param) -> bool: + """Compare user parameter name with param name or alias.""" + # Strip the "=" sign in the param_name. This is needed just for + # comparison with the parameters passed by the user. + # The equal sign needs to be honoured when re-building the + # parameter back. + param_name = None if not param.name else param.name.rstrip("=") + return user_param_name_or_alias in [param_name, param.alias] + + def resolved_parameters( + self, command_name: str, user_params: list[str] + ) -> list[tuple[str | None, Param]]: + """Return list of parameters with values.""" + result: list[tuple[str | None, Param]] = [] + command = self.commands.get(command_name) + if not command: + return result + + for param in command.params: + value = self._get_user_param_value(user_params, param) + if not value: + value = param.default_value + result.append((value, param)) + + return result + + def build_command( + self, + command_name: str, + user_params: list[str], + param_resolver: Callable[[str, str, list[tuple[str | None, Param]]], str], + ) -> list[str]: + """ + Return a list of executable command strings. + + Given a command and associated parameters, returns a list of executable command + strings. + """ + command = self.commands.get(command_name) + if not command: + raise ConfigurationException( + f"Command '{command_name}' could not be found." + ) + + commands_to_run = [] + + params_values = self.resolved_parameters(command_name, user_params) + for cmd_str in command.command_strings: + cmd_str = resolve_all_parameters( + cmd_str, param_resolver, command_name, params_values + ) + commands_to_run.append(cmd_str) + + return commands_to_run + + +class Param: + """Class for representing a generic application parameter.""" + + def __init__( # pylint: disable=too-many-arguments + self, + name: str | None, + description: str, + values: list[str] | None = None, + default_value: str | None = None, + alias: str | None = None, + ) -> None: + """Construct a Param instance.""" + if not name and not alias: + raise ConfigurationException( + "Either name, alias or both must be set to identify a parameter." + ) + self.name = name + self.values = values + self.description = description + self.default_value = default_value + self.alias = alias + + def get_details(self) -> dict: + """Return a dictionary with all relevant information of a Param.""" + return {key: value for key, value in self.__dict__.items() if value} + + def __eq__(self, other: object) -> bool: + """Overload operator ==.""" + if not isinstance(other, Param): + return False + + return ( + self.name == other.name + and self.values == other.values + and self.default_value == other.default_value + and self.description == other.description + ) + + +class Command: + """Class for representing a command.""" + + def __init__( + self, command_strings: list[str], params: list[Param] | None = None + ) -> None: + """Construct a Command instance.""" + self.command_strings = command_strings + + if params: + self.params = params + else: + self.params = [] + + self._validate() + + def _validate(self) -> None: + """Validate command.""" + if not self.params: + return + + aliases = [param.alias for param in self.params if param.alias is not None] + repeated_aliases = [ + alias for alias, count in Counter(aliases).items() if count > 1 + ] + + if repeated_aliases: + raise ConfigurationException( + f"Non-unique aliases {', '.join(repeated_aliases)}" + ) + + both_name_and_alias = [ + param.name + for param in self.params + if param.name in aliases and param.name != param.alias + ] + if both_name_and_alias: + raise ConfigurationException( + f"Aliases {', '.join(both_name_and_alias)} could not be used " + "as parameter name." + ) + + def get_details(self) -> dict: + """Return a dictionary with all relevant information of a Command.""" + output = { + "command_strings": self.command_strings, + "user_params": [param.get_details() for param in self.params], + } + return output + + def __eq__(self, other: object) -> bool: + """Overload operator ==.""" + if not isinstance(other, Command): + return False + + return ( + self.command_strings == other.command_strings + and self.params == other.params + ) + + +def resolve_all_parameters( + str_val: str, + param_resolver: Callable[[str, str, list[tuple[str | None, Param]]], str], + command_name: str | None = None, + params_values: list[tuple[str | None, Param]] | None = None, +) -> str: + """Resolve all parameters in the string.""" + if not str_val: + return str_val + + param_pattern: Final[Pattern] = re.compile(r"{(?P[\w.:]+)}") + while param_pattern.findall(str_val): + str_val = param_pattern.sub( + lambda m: param_resolver( + m["param_name"], command_name or "", params_values or [] + ), + str_val, + ) + return str_val + + +def load_application_configs( + config: Any, + config_type: type[Any], + is_system_required: bool = True, +) -> Any: + """Get one config for each system supported by the application. + + The configuration could contain different parameters/commands for different + supported systems. For each supported system this function will return separate + config with appropriate configuration. + """ + merged_configs = [] + supported_systems: list[NamedExecutionConfig] | None = config.get( + "supported_systems" + ) + if not supported_systems: + if is_system_required: + raise ConfigurationException("No supported systems definition provided") + # Create an empty system to be used in the parsing below + supported_systems = [cast(NamedExecutionConfig, {})] + + default_user_params = config.get("user_params", {}) + + def merge_config(system: NamedExecutionConfig) -> Any: + system_name = system.get("name") + if not system_name and is_system_required: + raise ConfigurationException( + "Unable to read supported system definition, name is missed" + ) + + merged_config = config_type(**config) + merged_config["supported_systems"] = [system_name] if system_name else [] + # merge default configuration and specific to the system + merged_config["commands"] = { + **config.get("commands", {}), + **system.get("commands", {}), + } + + params = {} + tool_user_params = system.get("user_params", {}) + command_names = tool_user_params.keys() | default_user_params.keys() + for command_name in command_names: + if command_name not in merged_config["commands"]: + continue + + params_default = default_user_params.get(command_name, []) + params_tool = tool_user_params.get(command_name, []) + if not params_default or not params_tool: + params[command_name] = params_tool or params_default + if params_default and params_tool: + if any(not p.get("alias") for p in params_default): + raise ConfigurationException( + f"Default parameters for command {command_name} " + "should have aliases" + ) + if any(not p.get("alias") for p in params_tool): + raise ConfigurationException( + f"{system_name} parameters for command {command_name} " + "should have aliases." + ) + + merged_by_alias = { + **{p.get("alias"): p for p in params_default}, + **{p.get("alias"): p for p in params_tool}, + } + params[command_name] = list(merged_by_alias.values()) + + merged_config["user_params"] = params + merged_config["variables"] = { + **config.get("variables", {}), + **system.get("variables", {}), + } + return merged_config + + merged_configs = [merge_config(system) for system in supported_systems] + + return merged_configs diff --git a/src/mlia/backend/executor/config.py b/src/mlia/backend/executor/config.py new file mode 100644 index 0000000..dca53da --- /dev/null +++ b/src/mlia/backend/executor/config.py @@ -0,0 +1,68 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Contain definition of backend configuration.""" +from __future__ import annotations + +from pathlib import Path +from typing import Dict +from typing import List +from typing import TypedDict +from typing import Union + + +class UserParamConfig(TypedDict, total=False): + """User parameter configuration.""" + + name: str | None + default_value: str + values: list[str] + description: str + alias: str + + +UserParamsConfig = Dict[str, List[UserParamConfig]] + + +class ExecutionConfig(TypedDict, total=False): + """Execution configuration.""" + + commands: dict[str, list[str]] + user_params: UserParamsConfig + variables: dict[str, str] + + +class NamedExecutionConfig(ExecutionConfig): + """Execution configuration with name.""" + + name: str + + +class BaseBackendConfig(ExecutionConfig, total=False): + """Base backend configuration.""" + + name: str + description: str + config_location: Path + annotations: dict[str, str | list[str]] + + +class ApplicationConfig(BaseBackendConfig, total=False): + """Application configuration.""" + + supported_systems: list[str] + + +class ExtendedApplicationConfig(BaseBackendConfig, total=False): + """Extended application configuration.""" + + supported_systems: list[NamedExecutionConfig] + + +class SystemConfig(BaseBackendConfig, total=False): + """System configuration.""" + + reporting: dict[str, dict] + + +BackendItemConfig = Union[ApplicationConfig, SystemConfig] +BackendConfig = Union[List[ExtendedApplicationConfig], List[SystemConfig]] diff --git a/src/mlia/backend/executor/execution.py b/src/mlia/backend/executor/execution.py new file mode 100644 index 0000000..e253b16 --- /dev/null +++ b/src/mlia/backend/executor/execution.py @@ -0,0 +1,342 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Application execution module.""" +from __future__ import annotations + +import logging +import re +from typing import cast + +from mlia.backend.executor.application import Application +from mlia.backend.executor.application import get_application +from mlia.backend.executor.common import Backend +from mlia.backend.executor.common import ConfigurationException +from mlia.backend.executor.common import Param +from mlia.backend.executor.system import get_system +from mlia.backend.executor.system import System + +logger = logging.getLogger(__name__) + + +class AnotherInstanceIsRunningException(Exception): + """Concurrent execution error.""" + + +class ExecutionContext: # pylint: disable=too-few-public-methods + """Command execution context.""" + + def __init__( + self, + app: Application, + app_params: list[str], + system: System, + system_params: list[str], + ): + """Init execution context.""" + self.app = app + self.app_params = app_params + self.system = system + self.system_params = system_params + + self.param_resolver = ParamResolver(self) + + self.stdout: bytearray | None = None + self.stderr: bytearray | None = None + + +class ParamResolver: + """Parameter resolver.""" + + def __init__(self, context: ExecutionContext): + """Init parameter resolver.""" + self.ctx = context + + @staticmethod + def resolve_user_params( + cmd_name: str | None, + index_or_alias: str, + resolved_params: list[tuple[str | None, Param]] | None, + ) -> str: + """Resolve user params.""" + if not cmd_name or resolved_params is None: + raise ConfigurationException("Unable to resolve user params") + + param_value: str | None = None + param: Param | None = None + + if index_or_alias.isnumeric(): + i = int(index_or_alias) + if i not in range(len(resolved_params)): + raise ConfigurationException( + f"Invalid index {i} for user params of command {cmd_name}" + ) + param_value, param = resolved_params[i] + else: + for val, par in resolved_params: + if par.alias == index_or_alias: + param_value, param = val, par + break + + if param is None: + raise ConfigurationException( + f"No user parameter for command '{cmd_name}' with " + f"alias '{index_or_alias}'." + ) + + if param_value: + # We need to handle to cases of parameters here: + # 1) Optional parameters (non-positional with a name and value) + # 2) Positional parameters (value only, no name needed) + # Default to empty strings for positional arguments + param_name = "" + separator = "" + if param.name is not None: + # A valid param name means we have an optional/non-positional argument: + # The separator is an empty string in case the param_name + # has an equal sign as we have to honour it. + # If the parameter doesn't end with an equal sign then a + # space character is injected to split the parameter name + # and its value + param_name = param.name + separator = "" if param.name.endswith("=") else " " + + return f"{param_name}{separator}{param_value}" + + if param.name is None: + raise ConfigurationException( + f"Missing user parameter with alias '{index_or_alias}' for " + f"command '{cmd_name}'." + ) + + return param.name # flag: just return the parameter name + + def resolve_commands_and_params( + self, backend_type: str, cmd_name: str, return_params: bool, index_or_alias: str + ) -> str: + """Resolve command or command's param value.""" + if backend_type == "system": + backend = cast(Backend, self.ctx.system) + backend_params = self.ctx.system_params + else: # Application backend + backend = cast(Backend, self.ctx.app) + backend_params = self.ctx.app_params + + if cmd_name not in backend.commands: + raise ConfigurationException(f"Command {cmd_name} not found") + + if return_params: + params = backend.resolved_parameters(cmd_name, backend_params) + if index_or_alias.isnumeric(): + i = int(index_or_alias) + if i not in range(len(params)): + raise ConfigurationException( + f"Invalid parameter index {i} for command {cmd_name}" + ) + + param_value = params[i][0] + else: + param_value = None + for value, param in params: + if param.alias == index_or_alias: + param_value = value + break + + if not param_value: + raise ConfigurationException( + "No value for parameter with index or " + f"alias {index_or_alias} of command {cmd_name}." + ) + return param_value + + if not index_or_alias.isnumeric(): + raise ConfigurationException(f"Bad command index {index_or_alias}") + + i = int(index_or_alias) + commands = backend.build_command(cmd_name, backend_params, self.param_resolver) + if i not in range(len(commands)): + raise ConfigurationException(f"Invalid index {i} for command {cmd_name}") + + return commands[i] + + def resolve_variables(self, backend_type: str, var_name: str) -> str: + """Resolve variable value.""" + if backend_type == "system": + backend = cast(Backend, self.ctx.system) + else: # Application backend + backend = cast(Backend, self.ctx.app) + + if var_name not in backend.variables: + raise ConfigurationException(f"Unknown variable {var_name}") + + return backend.variables[var_name] + + def param_matcher( + self, + param_name: str, + cmd_name: str | None, + resolved_params: list[tuple[str | None, Param]] | None, + ) -> str: + """Regexp to resolve a param from the param_name.""" + # this pattern supports parameter names like "application.commands.run:0" and + # "system.commands.run.params:0" + # Note: 'software' is included for backward compatibility. + commands_and_params_match = re.match( + r"(?Papplication|software|system)[.]commands[.]" + r"(?P\w+)" + r"(?P[.]params|)[:]" + r"(?P\w+)", + param_name, + ) + + if commands_and_params_match: + backend_type, cmd_name, return_params, index_or_alias = ( + commands_and_params_match["type"], + commands_and_params_match["name"], + commands_and_params_match["params"], + commands_and_params_match["index_or_alias"], + ) + return self.resolve_commands_and_params( + backend_type, cmd_name, bool(return_params), index_or_alias + ) + + # Note: 'software' is included for backward compatibility. + variables_match = re.match( + r"(?Papplication|software|system)[.]variables:(?P\w+)", + param_name, + ) + if variables_match: + backend_type, var_name = ( + variables_match["type"], + variables_match["var_name"], + ) + return self.resolve_variables(backend_type, var_name) + + user_params_match = re.match(r"user_params:(?P\w+)", param_name) + if user_params_match: + index_or_alias = user_params_match["index_or_alias"] + return self.resolve_user_params(cmd_name, index_or_alias, resolved_params) + + raise ConfigurationException(f"Unable to resolve parameter {param_name}") + + def param_resolver( + self, + param_name: str, + cmd_name: str | None = None, + resolved_params: list[tuple[str | None, Param]] | None = None, + ) -> str: + """Resolve parameter value based on current execution context.""" + # Note: 'software.*' is included for backward compatibility. + resolved_param = None + if param_name in ["application.name", "software.name"]: + resolved_param = self.ctx.app.name + elif param_name in ["application.description", "software.description"]: + resolved_param = self.ctx.app.description + elif self.ctx.app.config_location and ( + param_name in ["application.config_dir", "software.config_dir"] + ): + resolved_param = str(self.ctx.app.config_location.absolute()) + elif self.ctx.system is not None: + if param_name == "system.name": + resolved_param = self.ctx.system.name + elif param_name == "system.description": + resolved_param = self.ctx.system.description + elif param_name == "system.config_dir" and self.ctx.system.config_location: + resolved_param = str(self.ctx.system.config_location.absolute()) + + if not resolved_param: + resolved_param = self.param_matcher(param_name, cmd_name, resolved_params) + return resolved_param + + def __call__( + self, + param_name: str, + cmd_name: str | None = None, + resolved_params: list[tuple[str | None, Param]] | None = None, + ) -> str: + """Resolve provided parameter.""" + return self.param_resolver(param_name, cmd_name, resolved_params) + + +def validate_parameters( + backend: Backend, command_names: list[str], params: list[str] +) -> None: + """Check parameters passed to backend.""" + for param in params: + acceptable = any( + backend.validate_parameter(command_name, param) + for command_name in command_names + if command_name in backend.commands + ) + + if not acceptable: + backend_type = "System" if isinstance(backend, System) else "Application" + raise ValueError( + f"{backend_type} parameter '{param}' not valid for " + f"command '{' or '.join(command_names)}'." + ) + + +def get_application_by_name_and_system( + application_name: str, system_name: str +) -> Application: + """Get application.""" + applications = get_application(application_name, system_name) + if not applications: + raise ValueError( + f"Application '{application_name}' doesn't support the " + f"system '{system_name}'." + ) + + if len(applications) != 1: + raise ValueError( + f"Error during getting application {application_name} for the " + f"system {system_name}." + ) + + return applications[0] + + +def get_application_and_system( + application_name: str, system_name: str +) -> tuple[Application, System]: + """Return application and system by provided names.""" + system = get_system(system_name) + if not system: + raise ValueError(f"System {system_name} is not found.") + + application = get_application_by_name_and_system(application_name, system_name) + + return application, system + + +def run_application( + application_name: str, + application_params: list[str], + system_name: str, + system_params: list[str], +) -> ExecutionContext: + """Run application on the provided system.""" + application, system = get_application_and_system(application_name, system_name) + validate_parameters(application, ["run"], application_params) + validate_parameters(system, ["run"], system_params) + + ctx = ExecutionContext( + app=application, + app_params=application_params, + system=system, + system_params=system_params, + ) + + logger.debug("Generating commands to execute") + commands_to_run = ctx.system.build_command( + "run", ctx.system_params, ctx.param_resolver + ) + + for command in commands_to_run: + logger.debug("Running: %s", command) + exit_code, ctx.stdout, ctx.stderr = ctx.system.run(command) + + if exit_code != 0: + logger.warning("Application exited with exit code %i", exit_code) + + return ctx diff --git a/src/mlia/backend/executor/fs.py b/src/mlia/backend/executor/fs.py new file mode 100644 index 0000000..3fce19c --- /dev/null +++ b/src/mlia/backend/executor/fs.py @@ -0,0 +1,88 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Module to host all file system related functions.""" +from __future__ import annotations + +import re +import shutil +from pathlib import Path +from typing import Literal + +from mlia.utils.filesystem import get_mlia_resources + +ResourceType = Literal["applications", "systems"] + + +def get_backend_resources() -> Path: + """Get backend resources folder path.""" + return get_mlia_resources() / "backends" + + +def get_backends_path(name: ResourceType) -> Path: + """Return the absolute path of the specified resource. + + It uses importlib to return resources packaged with MANIFEST.in. + """ + if not name: + raise ResourceWarning("Resource name is not provided") + + resource_path = get_backend_resources() / name + if resource_path.is_dir(): + return resource_path + + raise ResourceWarning(f"Resource '{name}' not found.") + + +def copy_directory_content(source: Path, destination: Path) -> None: + """Copy content of the source directory into destination directory.""" + for item in source.iterdir(): + src = source / item.name + dest = destination / item.name + + if src.is_dir(): + shutil.copytree(src, dest) + else: + shutil.copy2(src, dest) + + +def remove_resource(resource_directory: str, resource_type: ResourceType) -> None: + """Remove resource data.""" + resources = get_backends_path(resource_type) + + resource_location = resources / resource_directory + if not resource_location.exists(): + raise Exception(f"Resource {resource_directory} does not exist") + + if not resource_location.is_dir(): + raise Exception(f"Wrong resource {resource_directory}") + + shutil.rmtree(resource_location) + + +def remove_directory(directory_path: Path | None) -> None: + """Remove directory.""" + if not directory_path or not directory_path.is_dir(): + raise Exception("No directory path provided") + + shutil.rmtree(directory_path) + + +def recreate_directory(directory_path: Path | None) -> None: + """Recreate directory.""" + if not directory_path: + raise Exception("No directory path provided") + + if directory_path.exists() and not directory_path.is_dir(): + raise Exception( + f"Path {str(directory_path)} does exist and it is not a directory." + ) + + if directory_path.is_dir(): + remove_directory(directory_path) + + directory_path.mkdir() + + +def valid_for_filename(value: str, replacement: str = "") -> str: + """Replace non alpha numeric characters.""" + return re.sub(r"[^\w.]", replacement, value, flags=re.ASCII) diff --git a/src/mlia/backend/executor/output_consumer.py b/src/mlia/backend/executor/output_consumer.py new file mode 100644 index 0000000..3c3b132 --- /dev/null +++ b/src/mlia/backend/executor/output_consumer.py @@ -0,0 +1,67 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Output consumers module.""" +from __future__ import annotations + +import base64 +import json +import re +from typing import Protocol +from typing import runtime_checkable + + +@runtime_checkable +class OutputConsumer(Protocol): + """Protocol to consume output.""" + + def feed(self, line: str) -> bool: + """ + Feed a new line to be parsed. + + Return True if the line should be removed from the output. + """ + + +class Base64OutputConsumer(OutputConsumer): + """ + Parser to extract base64-encoded JSON from tagged standard output. + + Example of the tagged output: + ``` + # Encoded JSON: {"test": 1234} + eyJ0ZXN0IjogMTIzNH0 + ``` + """ + + TAG_NAME = "metrics" + + def __init__(self) -> None: + """Set up the regular expression to extract tagged strings.""" + self._regex = re.compile(rf"<{self.TAG_NAME}>(.*)") + self.parsed_output: list = [] + + def feed(self, line: str) -> bool: + """ + Parse the output line and save the decoded output. + + Returns True if the line contains tagged output. + + Example: + Using the tagged output from the class docs the parser should collect + the following: + ``` + [ + {"test": 1234} + ] + ``` + """ + res_b64 = self._regex.search(line) + if res_b64: + res_json = base64.b64decode(res_b64.group(1), validate=True) + res = json.loads(res_json) + self.parsed_output.append(res) + # Remove this line from the output, i.e. consume it, as it + # does not contain any human readable content. + return True + + return False diff --git a/src/mlia/backend/executor/proc.py b/src/mlia/backend/executor/proc.py new file mode 100644 index 0000000..39a0689 --- /dev/null +++ b/src/mlia/backend/executor/proc.py @@ -0,0 +1,191 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Processes module. + +This module contains all classes and functions for dealing with Linux +processes. +""" +from __future__ import annotations + +import datetime +import logging +import shlex +import signal +import tempfile +import time +from pathlib import Path +from typing import Any + +from sh import Command +from sh import CommandNotFound +from sh import ErrorReturnCode +from sh import RunningCommand + +from mlia.backend.executor.fs import valid_for_filename + +logger = logging.getLogger(__name__) + + +class CommandFailedException(Exception): + """Exception for failed command execution.""" + + +class ShellCommand: + """Wrapper class for shell commands.""" + + def run( + self, + cmd: str, + *args: str, + _cwd: Path | None = None, + _tee: bool = True, + _bg: bool = True, + _out: Any = None, + _err: Any = None, + _search_paths: list[Path] | None = None, + ) -> RunningCommand: + """Run the shell command with the given arguments. + + There are special arguments that modify the behaviour of the process. + _cwd: current working directory + _tee: it redirects the stdout both to console and file + _bg: if True, it runs the process in background and the command is not + blocking. + _out: use this object for stdout redirect, + _err: use this object for stderr redirect, + _search_paths: If presented used for searching executable + """ + try: + kwargs = {} + if _cwd: + kwargs["_cwd"] = str(_cwd) + command = Command(cmd, _search_paths).bake(args, **kwargs) + except CommandNotFound as error: + logging.error("Command '%s' not found", error.args[0]) + raise error + + out, err = _out, _err + if not _out and not _err: + out, err = (str(item) for item in self.get_stdout_stderr_paths(cmd)) + + return command(_out=out, _err=err, _tee=_tee, _bg=_bg, _bg_exc=False) + + @classmethod + def get_stdout_stderr_paths(cls, cmd: str) -> tuple[Path, Path]: + """Construct and returns the paths of stdout/stderr files.""" + timestamp = datetime.datetime.now().timestamp() + base_path = Path(tempfile.mkdtemp(prefix="mlia-", suffix=f"{timestamp}")) + base = base_path / f"{valid_for_filename(cmd, '_')}_{timestamp}" + stdout = base.with_suffix(".out") + stderr = base.with_suffix(".err") + try: + stdout.touch() + stderr.touch() + except FileNotFoundError as error: + logging.error("File not found: %s", error.filename) + raise error + return stdout, stderr + + +def parse_command(command: str, shell: str = "bash") -> list[str]: + """Parse command.""" + cmd, *args = shlex.split(command, posix=True) + + if is_shell_script(cmd): + args = [cmd] + args + cmd = shell + + return [cmd] + args + + +def execute_command( # pylint: disable=invalid-name + command: str, + cwd: Path, + bg: bool = False, + shell: str = "bash", + out: Any = None, + err: Any = None, +) -> RunningCommand: + """Execute shell command.""" + cmd, *args = parse_command(command, shell) + + search_paths = None + if cmd != shell and (cwd / cmd).is_file(): + search_paths = [cwd] + + return ShellCommand().run( + cmd, *args, _cwd=cwd, _bg=bg, _search_paths=search_paths, _out=out, _err=err + ) + + +def is_shell_script(cmd: str) -> bool: + """Check if command is shell script.""" + return cmd.endswith(".sh") + + +def run_and_wait( + command: str, + cwd: Path, + terminate_on_error: bool = False, + out: Any = None, + err: Any = None, +) -> tuple[int, bytearray, bytearray]: + """ + Run command and wait while it is executing. + + Returns a tuple: (exit_code, stdout, stderr) + """ + running_cmd: RunningCommand | None = None + try: + running_cmd = execute_command(command, cwd, bg=True, out=out, err=err) + return running_cmd.exit_code, running_cmd.stdout, running_cmd.stderr + except ErrorReturnCode as cmd_failed: + raise CommandFailedException() from cmd_failed + except Exception as error: + is_running = running_cmd is not None and running_cmd.is_alive() + if terminate_on_error and is_running: + logger.debug("Terminating ...") + terminate_command(running_cmd) + + raise error + + +def terminate_command( + running_cmd: RunningCommand, + wait: bool = True, + wait_period: float = 0.5, + number_of_attempts: int = 20, +) -> None: + """Terminate running command.""" + try: + running_cmd.process.signal_group(signal.SIGINT) + if wait: + for _ in range(number_of_attempts): + time.sleep(wait_period) + if not running_cmd.is_alive(): + return + logger.error( + "Unable to terminate process %i. Sending SIGTERM...", + running_cmd.process.pid, + ) + running_cmd.process.signal_group(signal.SIGTERM) + except ProcessLookupError: + pass + + +def print_command_stdout(command: RunningCommand) -> None: + """Print the stdout of a command. + + The command has 2 states: running and done. + If the command is running, the output is taken by the running process. + If the command has ended its execution, the stdout is taken from stdout + property + """ + if command.is_alive(): + while True: + try: + print(command.next(), end="") + except StopIteration: + break + else: + print(command.stdout) diff --git a/src/mlia/backend/executor/runner.py b/src/mlia/backend/executor/runner.py new file mode 100644 index 0000000..2330fd9 --- /dev/null +++ b/src/mlia/backend/executor/runner.py @@ -0,0 +1,98 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Module for backend runner.""" +from __future__ import annotations + +from dataclasses import dataclass +from pathlib import Path + +from mlia.backend.executor.application import get_available_applications +from mlia.backend.executor.application import install_application +from mlia.backend.executor.execution import ExecutionContext +from mlia.backend.executor.execution import run_application +from mlia.backend.executor.system import get_available_systems +from mlia.backend.executor.system import install_system + + +@dataclass +class ExecutionParams: + """Application execution params.""" + + application: str + system: str + application_params: list[str] + system_params: list[str] + + +class BackendRunner: + """Backend runner.""" + + def __init__(self) -> None: + """Init BackendRunner instance.""" + + @staticmethod + def get_installed_systems() -> list[str]: + """Get list of the installed systems.""" + return [system.name for system in get_available_systems()] + + @staticmethod + def get_installed_applications(system: str | None = None) -> list[str]: + """Get list of the installed application.""" + return [ + app.name + for app in get_available_applications() + if system is None or app.can_run_on(system) + ] + + def is_application_installed(self, application: str, system: str) -> bool: + """Return true if requested application installed.""" + return application in self.get_installed_applications(system) + + def is_system_installed(self, system: str) -> bool: + """Return true if requested system installed.""" + return system in self.get_installed_systems() + + def systems_installed(self, systems: list[str]) -> bool: + """Check if all provided systems are installed.""" + if not systems: + return False + + installed_systems = self.get_installed_systems() + return all(system in installed_systems for system in systems) + + def applications_installed(self, applications: list[str]) -> bool: + """Check if all provided applications are installed.""" + if not applications: + return False + + installed_apps = self.get_installed_applications() + return all(app in installed_apps for app in applications) + + def all_installed(self, systems: list[str], apps: list[str]) -> bool: + """Check if all provided artifacts are installed.""" + return self.systems_installed(systems) and self.applications_installed(apps) + + @staticmethod + def install_system(system_path: Path) -> None: + """Install system.""" + install_system(system_path) + + @staticmethod + def install_application(app_path: Path) -> None: + """Install application.""" + install_application(app_path) + + @staticmethod + def run_application(execution_params: ExecutionParams) -> ExecutionContext: + """Run requested application.""" + ctx = run_application( + execution_params.application, + execution_params.application_params, + execution_params.system, + execution_params.system_params, + ) + return ctx + + @staticmethod + def _params(name: str, params: list[str]) -> list[str]: + return [p for item in [(name, param) for param in params] for p in item] diff --git a/src/mlia/backend/executor/source.py b/src/mlia/backend/executor/source.py new file mode 100644 index 0000000..6abc49f --- /dev/null +++ b/src/mlia/backend/executor/source.py @@ -0,0 +1,207 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Contain source related classes and functions.""" +from __future__ import annotations + +import os +import shutil +import tarfile +from abc import ABC +from abc import abstractmethod +from pathlib import Path +from tarfile import TarFile + +from mlia.backend.executor.common import BACKEND_CONFIG_FILE +from mlia.backend.executor.common import ConfigurationException +from mlia.backend.executor.common import get_backend_config +from mlia.backend.executor.common import is_backend_directory +from mlia.backend.executor.common import load_config +from mlia.backend.executor.config import BackendConfig +from mlia.backend.executor.fs import copy_directory_content + + +class Source(ABC): + """Source class.""" + + @abstractmethod + def name(self) -> str | None: + """Get source name.""" + + @abstractmethod + def config(self) -> BackendConfig | None: + """Get configuration file content.""" + + @abstractmethod + def install_into(self, destination: Path) -> None: + """Install source into destination directory.""" + + @abstractmethod + def create_destination(self) -> bool: + """Return True if destination folder should be created before installation.""" + + +class DirectorySource(Source): + """DirectorySource class.""" + + def __init__(self, directory_path: Path) -> None: + """Create the DirectorySource instance.""" + assert isinstance(directory_path, Path) + self.directory_path = directory_path + + def name(self) -> str: + """Return name of source.""" + return self.directory_path.name + + def config(self) -> BackendConfig | None: + """Return configuration file content.""" + if not is_backend_directory(self.directory_path): + raise ConfigurationException("No configuration file found") + + config_file = get_backend_config(self.directory_path) + return load_config(config_file) + + def install_into(self, destination: Path) -> None: + """Install source into destination directory.""" + if not destination.is_dir(): + raise ConfigurationException(f"Wrong destination {destination}.") + + if not self.directory_path.is_dir(): + raise ConfigurationException( + f"Directory {self.directory_path} does not exist." + ) + + copy_directory_content(self.directory_path, destination) + + def create_destination(self) -> bool: + """Return True if destination folder should be created before installation.""" + return True + + +class TarArchiveSource(Source): + """TarArchiveSource class.""" + + def __init__(self, archive_path: Path) -> None: + """Create the TarArchiveSource class.""" + assert isinstance(archive_path, Path) + self.archive_path = archive_path + self._config: BackendConfig | None = None + self._has_top_level_folder: bool | None = None + self._name: str | None = None + + def _read_archive_content(self) -> None: + """Read various information about archive.""" + # get source name from archive name (everything without extensions) + extensions = "".join(self.archive_path.suffixes) + self._name = self.archive_path.name.rstrip(extensions) + + if not self.archive_path.exists(): + return + + with self._open(self.archive_path) as archive: + try: + config_entry = archive.getmember(BACKEND_CONFIG_FILE) + self._has_top_level_folder = False + except KeyError as error_no_config: + try: + archive_entries = archive.getnames() + entries_common_prefix = os.path.commonprefix(archive_entries) + top_level_dir = entries_common_prefix.rstrip("/") + + if not top_level_dir: + raise RuntimeError( + "Archive has no top level directory" + ) from error_no_config + + config_path = f"{top_level_dir}/{BACKEND_CONFIG_FILE}" + + config_entry = archive.getmember(config_path) + self._has_top_level_folder = True + self._name = top_level_dir + except (KeyError, RuntimeError) as error_no_root_dir_or_config: + raise ConfigurationException( + "No configuration file found" + ) from error_no_root_dir_or_config + + content = archive.extractfile(config_entry) + self._config = load_config(content) + + def config(self) -> BackendConfig | None: + """Return configuration file content.""" + if self._config is None: + self._read_archive_content() + + return self._config + + def name(self) -> str | None: + """Return name of the source.""" + if self._name is None: + self._read_archive_content() + + return self._name + + def create_destination(self) -> bool: + """Return True if destination folder must be created before installation.""" + if self._has_top_level_folder is None: + self._read_archive_content() + + return not self._has_top_level_folder + + def install_into(self, destination: Path) -> None: + """Install source into destination directory.""" + if not destination.is_dir(): + raise ConfigurationException(f"Wrong destination {destination}.") + + with self._open(self.archive_path) as archive: + archive.extractall(destination) + + def _open(self, archive_path: Path) -> TarFile: + """Open archive file.""" + if not archive_path.is_file(): + raise ConfigurationException(f"File {archive_path} does not exist.") + + if archive_path.name.endswith("tar.gz") or archive_path.name.endswith("tgz"): + mode = "r:gz" + else: + raise ConfigurationException(f"Unsupported archive type {archive_path}.") + + # The returned TarFile object can be used as a context manager (using + # 'with') by the calling instance. + return tarfile.open( # pylint: disable=consider-using-with + self.archive_path, mode=mode + ) + + +def get_source(source_path: Path) -> TarArchiveSource | DirectorySource: + """Return appropriate source instance based on provided source path.""" + if source_path.is_file(): + return TarArchiveSource(source_path) + + if source_path.is_dir(): + return DirectorySource(source_path) + + raise ConfigurationException(f"Unable to read {source_path}.") + + +def create_destination_and_install(source: Source, resource_path: Path) -> None: + """Create destination directory and install source. + + This function is used for actual installation of system/backend New + directory will be created inside :resource_path: if needed If for example + archive contains top level folder then no need to create new directory + """ + destination = resource_path + create_destination = source.create_destination() + + if create_destination: + name = source.name() + if not name: + raise ConfigurationException("Unable to get source name.") + + destination = resource_path / name + destination.mkdir() + try: + source.install_into(destination) + except Exception as error: + if create_destination: + shutil.rmtree(destination) + raise error diff --git a/src/mlia/backend/executor/system.py b/src/mlia/backend/executor/system.py new file mode 100644 index 0000000..a5ecf19 --- /dev/null +++ b/src/mlia/backend/executor/system.py @@ -0,0 +1,178 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""System backend module.""" +from __future__ import annotations + +from pathlib import Path +from typing import Any +from typing import cast +from typing import List + +from mlia.backend.executor.common import Backend +from mlia.backend.executor.common import ConfigurationException +from mlia.backend.executor.common import get_backend_configs +from mlia.backend.executor.common import get_backend_directories +from mlia.backend.executor.common import load_config +from mlia.backend.executor.common import remove_backend +from mlia.backend.executor.config import SystemConfig +from mlia.backend.executor.fs import get_backends_path +from mlia.backend.executor.proc import run_and_wait +from mlia.backend.executor.source import create_destination_and_install +from mlia.backend.executor.source import get_source + + +class System(Backend): + """System class.""" + + def __init__(self, config: SystemConfig) -> None: + """Construct the System class using the dictionary passed.""" + super().__init__(config) + + self._setup_reporting(config) + + def _setup_reporting(self, config: SystemConfig) -> None: + self.reporting = config.get("reporting") + + def run(self, command: str) -> tuple[int, bytearray, bytearray]: + """ + Run command on the system. + + Returns a tuple: (exit_code, stdout, stderr) + """ + cwd = self.config_location + if not isinstance(cwd, Path) or not cwd.is_dir(): + raise ConfigurationException( + f"System has invalid config location: {cwd}", + ) + + stdout = bytearray() + stderr = bytearray() + + return run_and_wait( + command, + cwd=cwd, + terminate_on_error=True, + out=stdout, + err=stderr, + ) + + def __eq__(self, other: object) -> bool: + """Overload operator ==.""" + if not isinstance(other, System): + return False + + return super().__eq__(other) and self.name == other.name + + def get_details(self) -> dict[str, Any]: + """Return a dictionary with all relevant information of a System.""" + output = { + "type": "system", + "name": self.name, + "description": self.description, + "commands": self._get_command_details(), + "annotations": self.annotations, + } + + return output + + +def get_available_systems_directory_names() -> list[str]: + """Return a list of directory names for all avialable systems.""" + return [entry.name for entry in get_backend_directories("systems")] + + +def get_available_systems() -> list[System]: + """Return a list with all available systems.""" + available_systems = [] + for config_json in get_backend_configs("systems"): + config_entries = cast(List[SystemConfig], (load_config(config_json))) + for config_entry in config_entries: + config_entry["config_location"] = config_json.parent.absolute() + system = load_system(config_entry) + available_systems.append(system) + + return sorted(available_systems, key=lambda system: system.name) + + +def get_system(system_name: str) -> System: + """Return a system instance with the same name passed as argument.""" + available_systems = get_available_systems() + for system in available_systems: + if system_name == system.name: + return system + raise ConfigurationException(f"System '{system_name}' not found.") + + +def install_system(source_path: Path) -> None: + """Install new system.""" + try: + source = get_source(source_path) + config = cast(List[SystemConfig], source.config()) + systems_to_install = [load_system(entry) for entry in config] + except Exception as error: + raise ConfigurationException("Unable to read system definition") from error + + if not systems_to_install: + raise ConfigurationException("No system definition found") + + available_systems = get_available_systems() + already_installed = [s for s in systems_to_install if s in available_systems] + if already_installed: + names = [system.name for system in already_installed] + raise ConfigurationException( + f"Systems [{','.join(names)}] are already installed." + ) + + create_destination_and_install(source, get_backends_path("systems")) + + +def remove_system(directory_name: str) -> None: + """Remove system.""" + remove_backend(directory_name, "systems") + + +def load_system(config: SystemConfig) -> System: + """Load system based on it's execution type.""" + populate_shared_params(config) + + return System(config) + + +def populate_shared_params(config: SystemConfig) -> None: + """Populate command parameters with shared parameters.""" + user_params = config.get("user_params") + if not user_params or "shared" not in user_params: + return + + shared_user_params = user_params["shared"] + if not shared_user_params: + return + + only_aliases = all(p.get("alias") for p in shared_user_params) + if not only_aliases: + raise ConfigurationException("All shared parameters should have aliases") + + commands = config.get("commands", {}) + for cmd_name in ["run"]: + command = commands.get(cmd_name) + if command is None: + commands[cmd_name] = [] + cmd_user_params = user_params.get(cmd_name) + if not cmd_user_params: + cmd_user_params = shared_user_params + else: + only_aliases = all(p.get("alias") for p in cmd_user_params) + if not only_aliases: + raise ConfigurationException( + f"All parameters for command {cmd_name} should have aliases." + ) + merged_by_alias = { + **{p.get("alias"): p for p in shared_user_params}, + **{p.get("alias"): p for p in cmd_user_params}, + } + cmd_user_params = list(merged_by_alias.values()) + + user_params[cmd_name] = cmd_user_params + + config["commands"] = commands + del user_params["shared"] diff --git a/src/mlia/backend/fs.py b/src/mlia/backend/fs.py deleted file mode 100644 index 3fce19c..0000000 --- a/src/mlia/backend/fs.py +++ /dev/null @@ -1,88 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Module to host all file system related functions.""" -from __future__ import annotations - -import re -import shutil -from pathlib import Path -from typing import Literal - -from mlia.utils.filesystem import get_mlia_resources - -ResourceType = Literal["applications", "systems"] - - -def get_backend_resources() -> Path: - """Get backend resources folder path.""" - return get_mlia_resources() / "backends" - - -def get_backends_path(name: ResourceType) -> Path: - """Return the absolute path of the specified resource. - - It uses importlib to return resources packaged with MANIFEST.in. - """ - if not name: - raise ResourceWarning("Resource name is not provided") - - resource_path = get_backend_resources() / name - if resource_path.is_dir(): - return resource_path - - raise ResourceWarning(f"Resource '{name}' not found.") - - -def copy_directory_content(source: Path, destination: Path) -> None: - """Copy content of the source directory into destination directory.""" - for item in source.iterdir(): - src = source / item.name - dest = destination / item.name - - if src.is_dir(): - shutil.copytree(src, dest) - else: - shutil.copy2(src, dest) - - -def remove_resource(resource_directory: str, resource_type: ResourceType) -> None: - """Remove resource data.""" - resources = get_backends_path(resource_type) - - resource_location = resources / resource_directory - if not resource_location.exists(): - raise Exception(f"Resource {resource_directory} does not exist") - - if not resource_location.is_dir(): - raise Exception(f"Wrong resource {resource_directory}") - - shutil.rmtree(resource_location) - - -def remove_directory(directory_path: Path | None) -> None: - """Remove directory.""" - if not directory_path or not directory_path.is_dir(): - raise Exception("No directory path provided") - - shutil.rmtree(directory_path) - - -def recreate_directory(directory_path: Path | None) -> None: - """Recreate directory.""" - if not directory_path: - raise Exception("No directory path provided") - - if directory_path.exists() and not directory_path.is_dir(): - raise Exception( - f"Path {str(directory_path)} does exist and it is not a directory." - ) - - if directory_path.is_dir(): - remove_directory(directory_path) - - directory_path.mkdir() - - -def valid_for_filename(value: str, replacement: str = "") -> str: - """Replace non alpha numeric characters.""" - return re.sub(r"[^\w.]", replacement, value, flags=re.ASCII) diff --git a/src/mlia/backend/install.py b/src/mlia/backend/install.py new file mode 100644 index 0000000..eea3403 --- /dev/null +++ b/src/mlia/backend/install.py @@ -0,0 +1,450 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Module for installation process.""" +from __future__ import annotations + +import logging +import platform +import tarfile +from abc import ABC +from abc import abstractmethod +from dataclasses import dataclass +from pathlib import Path +from typing import Callable +from typing import Iterable +from typing import Optional +from typing import Union + +from mlia.backend.executor.runner import BackendRunner +from mlia.backend.executor.system import remove_system +from mlia.utils.download import DownloadArtifact +from mlia.utils.filesystem import all_files_exist +from mlia.utils.filesystem import all_paths_valid +from mlia.utils.filesystem import copy_all +from mlia.utils.filesystem import get_mlia_resources +from mlia.utils.filesystem import temp_directory +from mlia.utils.filesystem import working_directory +from mlia.utils.py_manager import get_package_manager + + +logger = logging.getLogger(__name__) + + +# Mapping backend -> device_type -> system_name +_SUPPORTED_SYSTEMS = { + "Corstone-300": { + "ethos-u55": "Corstone-300: Cortex-M55+Ethos-U55", + "ethos-u65": "Corstone-300: Cortex-M55+Ethos-U65", + }, + "Corstone-310": { + "ethos-u55": "Corstone-310: Cortex-M85+Ethos-U55", + "ethos-u65": "Corstone-310: Cortex-M85+Ethos-U65", + }, +} + +# Mapping system_name -> application +_SYSTEM_TO_APP_MAP = { + "Corstone-300: Cortex-M55+Ethos-U55": "Generic Inference Runner: Ethos-U55", + "Corstone-300: Cortex-M55+Ethos-U65": "Generic Inference Runner: Ethos-U65", + "Corstone-310: Cortex-M85+Ethos-U55": "Generic Inference Runner: Ethos-U55", + "Corstone-310: Cortex-M85+Ethos-U65": "Generic Inference Runner: Ethos-U65", +} + + +def get_system_name(backend: str, device_type: str) -> str: + """Get the system name for the given backend and device type.""" + return _SUPPORTED_SYSTEMS[backend][device_type] + + +def get_application_name(system_name: str) -> str: + """Get application name for the provided system name.""" + return _SYSTEM_TO_APP_MAP[system_name] + + +def is_supported(backend: str, device_type: str | None = None) -> bool: + """Check if the backend (and optionally device type) is supported.""" + if device_type is None: + return backend in _SUPPORTED_SYSTEMS + + try: + get_system_name(backend, device_type) + return True + except KeyError: + return False + + +def supported_backends() -> list[str]: + """Get a list of all backends supported by the backend manager.""" + return list(_SUPPORTED_SYSTEMS.keys()) + + +def get_all_system_names(backend: str) -> list[str]: + """Get all systems supported by the backend.""" + return list(_SUPPORTED_SYSTEMS.get(backend, {}).values()) + + +def get_all_application_names(backend: str) -> list[str]: + """Get all applications supported by the backend.""" + app_set = {_SYSTEM_TO_APP_MAP[sys] for sys in get_all_system_names(backend)} + return list(app_set) + + +@dataclass +class InstallFromPath: + """Installation from the local path.""" + + backend_path: Path + + +@dataclass +class DownloadAndInstall: + """Download and install.""" + + eula_agreement: bool = True + + +InstallationType = Union[InstallFromPath, DownloadAndInstall] + + +class Installation(ABC): + """Base class for the installation process of the backends.""" + + @property + @abstractmethod + def name(self) -> str: + """Return name of the backend.""" + + @property + @abstractmethod + def description(self) -> str: + """Return description of the backend.""" + + @property + @abstractmethod + def could_be_installed(self) -> bool: + """Return true if backend could be installed in current environment.""" + + @property + @abstractmethod + def already_installed(self) -> bool: + """Return true if backend is already installed.""" + + @abstractmethod + def supports(self, install_type: InstallationType) -> bool: + """Return true if installation supports requested installation type.""" + + @abstractmethod + def install(self, install_type: InstallationType) -> None: + """Install the backend.""" + + @abstractmethod + def uninstall(self) -> None: + """Uninstall the backend.""" + + +@dataclass +class BackendInfo: + """Backend information.""" + + backend_path: Path + copy_source: bool = True + system_config: str | None = None + + +PathChecker = Callable[[Path], Optional[BackendInfo]] +BackendInstaller = Callable[[bool, Path], Path] + + +class BackendMetadata: + """Backend installation metadata.""" + + def __init__( + self, + name: str, + description: str, + system_config: str, + apps_resources: list[str], + fvp_dir_name: str, + download_artifact: DownloadArtifact | None, + supported_platforms: list[str] | None = None, + ) -> None: + """ + Initialize BackendMetadata. + + Members expected_systems and expected_apps are filled automatically. + """ + self.name = name + self.description = description + self.system_config = system_config + self.apps_resources = apps_resources + self.fvp_dir_name = fvp_dir_name + self.download_artifact = download_artifact + self.supported_platforms = supported_platforms + + self.expected_systems = get_all_system_names(name) + self.expected_apps = get_all_application_names(name) + + @property + def expected_resources(self) -> Iterable[Path]: + """Return list of expected resources.""" + resources = [self.system_config, *self.apps_resources] + + return (get_mlia_resources() / resource for resource in resources) + + @property + def supported_platform(self) -> bool: + """Return true if current platform supported.""" + if not self.supported_platforms: + return True + + return platform.system() in self.supported_platforms + + +class BackendInstallation(Installation): + """Backend installation.""" + + def __init__( + self, + backend_runner: BackendRunner, + metadata: BackendMetadata, + path_checker: PathChecker, + backend_installer: BackendInstaller | None, + ) -> None: + """Init the backend installation.""" + self.backend_runner = backend_runner + self.metadata = metadata + self.path_checker = path_checker + self.backend_installer = backend_installer + + @property + def name(self) -> str: + """Return name of the backend.""" + return self.metadata.name + + @property + def description(self) -> str: + """Return description of the backend.""" + return self.metadata.description + + @property + def already_installed(self) -> bool: + """Return true if backend already installed.""" + return self.backend_runner.all_installed( + self.metadata.expected_systems, self.metadata.expected_apps + ) + + @property + def could_be_installed(self) -> bool: + """Return true if backend could be installed.""" + if not self.metadata.supported_platform: + return False + + return all_paths_valid(self.metadata.expected_resources) + + def supports(self, install_type: InstallationType) -> bool: + """Return true if backends supported type of the installation.""" + if isinstance(install_type, DownloadAndInstall): + return self.metadata.download_artifact is not None + + if isinstance(install_type, InstallFromPath): + return self.path_checker(install_type.backend_path) is not None + + return False # type: ignore + + def install(self, install_type: InstallationType) -> None: + """Install the backend.""" + if isinstance(install_type, DownloadAndInstall): + download_artifact = self.metadata.download_artifact + assert download_artifact is not None, "No artifact provided" + + self.download_and_install(download_artifact, install_type.eula_agreement) + elif isinstance(install_type, InstallFromPath): + backend_path = self.path_checker(install_type.backend_path) + assert backend_path is not None, "Unable to resolve backend path" + + self.install_from(backend_path) + else: + raise Exception(f"Unable to install {install_type}") + + def install_from(self, backend_info: BackendInfo) -> None: + """Install backend from the directory.""" + mlia_resources = get_mlia_resources() + + with temp_directory() as tmpdir: + fvp_dist_dir = tmpdir / self.metadata.fvp_dir_name + + system_config = self.metadata.system_config + if backend_info.system_config: + system_config = backend_info.system_config + + resources_to_copy = [mlia_resources / system_config] + if backend_info.copy_source: + resources_to_copy.append(backend_info.backend_path) + + copy_all(*resources_to_copy, dest=fvp_dist_dir) + + self.backend_runner.install_system(fvp_dist_dir) + + for app in self.metadata.apps_resources: + self.backend_runner.install_application(mlia_resources / app) + + def download_and_install( + self, download_artifact: DownloadArtifact, eula_agrement: bool + ) -> None: + """Download and install the backend.""" + with temp_directory() as tmpdir: + try: + downloaded_to = download_artifact.download_to(tmpdir) + except Exception as err: + raise Exception("Unable to download backend artifact") from err + + with working_directory(tmpdir / "dist", create_dir=True) as dist_dir: + with tarfile.open(downloaded_to) as archive: + archive.extractall(dist_dir) + + assert self.backend_installer, ( + f"Backend '{self.metadata.name}' does not support " + "download and installation." + ) + backend_path = self.backend_installer(eula_agrement, dist_dir) + if self.path_checker(backend_path) is None: + raise Exception("Downloaded artifact has invalid structure") + + self.install(InstallFromPath(backend_path)) + + def uninstall(self) -> None: + """Uninstall the backend.""" + remove_system(self.metadata.fvp_dir_name) + + +class PackagePathChecker: + """Package path checker.""" + + def __init__( + self, expected_files: list[str], backend_subfolder: str | None = None + ) -> None: + """Init the path checker.""" + self.expected_files = expected_files + self.backend_subfolder = backend_subfolder + + def __call__(self, backend_path: Path) -> BackendInfo | None: + """Check if directory contains all expected files.""" + resolved_paths = (backend_path / file for file in self.expected_files) + if not all_files_exist(resolved_paths): + return None + + if self.backend_subfolder: + subfolder = backend_path / self.backend_subfolder + + if not subfolder.is_dir(): + return None + + return BackendInfo(subfolder) + + return BackendInfo(backend_path) + + +class StaticPathChecker: + """Static path checker.""" + + def __init__( + self, + static_backend_path: Path, + expected_files: list[str], + copy_source: bool = False, + system_config: str | None = None, + ) -> None: + """Init static path checker.""" + self.static_backend_path = static_backend_path + self.expected_files = expected_files + self.copy_source = copy_source + self.system_config = system_config + + def __call__(self, backend_path: Path) -> BackendInfo | None: + """Check if directory equals static backend path with all expected files.""" + if backend_path != self.static_backend_path: + return None + + resolved_paths = (backend_path / file for file in self.expected_files) + if not all_files_exist(resolved_paths): + return None + + return BackendInfo( + backend_path, + copy_source=self.copy_source, + system_config=self.system_config, + ) + + +class CompoundPathChecker: + """Compound path checker.""" + + def __init__(self, *path_checkers: PathChecker) -> None: + """Init compound path checker.""" + self.path_checkers = path_checkers + + def __call__(self, backend_path: Path) -> BackendInfo | None: + """Iterate over checkers and return first non empty backend info.""" + first_resolved_backend_info = ( + backend_info + for path_checker in self.path_checkers + if (backend_info := path_checker(backend_path)) is not None + ) + + return next(first_resolved_backend_info, None) + + +class PyPackageBackendInstallation(Installation): + """Backend based on the python package.""" + + def __init__( + self, + name: str, + description: str, + packages_to_install: list[str], + packages_to_uninstall: list[str], + expected_packages: list[str], + ) -> None: + """Init the backend installation.""" + self._name = name + self._description = description + self._packages_to_install = packages_to_install + self._packages_to_uninstall = packages_to_uninstall + self._expected_packages = expected_packages + + self.package_manager = get_package_manager() + + @property + def name(self) -> str: + """Return name of the backend.""" + return self._name + + @property + def description(self) -> str: + """Return description of the backend.""" + return self._description + + @property + def could_be_installed(self) -> bool: + """Check if backend could be installed.""" + return True + + @property + def already_installed(self) -> bool: + """Check if backend already installed.""" + return self.package_manager.packages_installed(self._expected_packages) + + def supports(self, install_type: InstallationType) -> bool: + """Return true if installation supports requested installation type.""" + return isinstance(install_type, DownloadAndInstall) + + def install(self, install_type: InstallationType) -> None: + """Install the backend.""" + if not self.supports(install_type): + raise Exception(f"Unsupported installation type {install_type}") + + self.package_manager.install(self._packages_to_install) + + def uninstall(self) -> None: + """Uninstall the backend.""" + self.package_manager.uninstall(self._packages_to_uninstall) diff --git a/src/mlia/backend/manager.py b/src/mlia/backend/manager.py index 6a61ab0..c02dc6e 100644 --- a/src/mlia/backend/manager.py +++ b/src/mlia/backend/manager.py @@ -1,372 +1,271 @@ # SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. # SPDX-License-Identifier: Apache-2.0 -"""Module for backend integration.""" +"""Module for installation process.""" from __future__ import annotations import logging from abc import ABC from abc import abstractmethod -from dataclasses import dataclass from pathlib import Path -from typing import Literal +from typing import Callable -from mlia.backend.application import get_available_applications -from mlia.backend.application import install_application -from mlia.backend.execution import ExecutionContext -from mlia.backend.execution import run_application -from mlia.backend.output_consumer import Base64OutputConsumer -from mlia.backend.output_consumer import OutputConsumer -from mlia.backend.system import get_available_systems -from mlia.backend.system import install_system +from mlia.backend.install import DownloadAndInstall +from mlia.backend.install import Installation +from mlia.backend.install import InstallationType +from mlia.backend.install import InstallFromPath +from mlia.core.errors import ConfigurationError +from mlia.core.errors import InternalError +from mlia.utils.misc import yes logger = logging.getLogger(__name__) -# Mapping backend -> device_type -> system_name -_SUPPORTED_SYSTEMS = { - "Corstone-300": { - "ethos-u55": "Corstone-300: Cortex-M55+Ethos-U55", - "ethos-u65": "Corstone-300: Cortex-M55+Ethos-U65", - }, - "Corstone-310": { - "ethos-u55": "Corstone-310: Cortex-M85+Ethos-U55", - "ethos-u65": "Corstone-310: Cortex-M85+Ethos-U65", - }, -} +InstallationFilter = Callable[[Installation], bool] -# Mapping system_name -> application -_SYSTEM_TO_APP_MAP = { - "Corstone-300: Cortex-M55+Ethos-U55": "Generic Inference Runner: Ethos-U55", - "Corstone-300: Cortex-M55+Ethos-U65": "Generic Inference Runner: Ethos-U65", - "Corstone-310: Cortex-M85+Ethos-U55": "Generic Inference Runner: Ethos-U55", - "Corstone-310: Cortex-M85+Ethos-U65": "Generic Inference Runner: Ethos-U65", -} +class AlreadyInstalledFilter: + """Filter for already installed backends.""" -def get_system_name(backend: str, device_type: str) -> str: - """Get the system name for the given backend and device type.""" - return _SUPPORTED_SYSTEMS[backend][device_type] + def __call__(self, installation: Installation) -> bool: + """Installation filter.""" + return installation.already_installed -def is_supported(backend: str, device_type: str | None = None) -> bool: - """Check if the backend (and optionally device type) is supported.""" - if device_type is None: - return backend in _SUPPORTED_SYSTEMS +class ReadyForInstallationFilter: + """Filter for ready to be installed backends.""" - try: - get_system_name(backend, device_type) - return True - except KeyError: - return False + def __call__(self, installation: Installation) -> bool: + """Installation filter.""" + return installation.could_be_installed and not installation.already_installed -def supported_backends() -> list[str]: - """Get a list of all backends supported by the backend manager.""" - return list(_SUPPORTED_SYSTEMS.keys()) +class SupportsInstallTypeFilter: + """Filter backends that support certain type of the installation.""" + def __init__(self, installation_type: InstallationType) -> None: + """Init filter.""" + self.installation_type = installation_type -def get_all_system_names(backend: str) -> list[str]: - """Get all systems supported by the backend.""" - return list(_SUPPORTED_SYSTEMS.get(backend, {}).values()) + def __call__(self, installation: Installation) -> bool: + """Installation filter.""" + return installation.supports(self.installation_type) -def get_all_application_names(backend: str) -> list[str]: - """Get all applications supported by the backend.""" - app_set = {_SYSTEM_TO_APP_MAP[sys] for sys in get_all_system_names(backend)} - return list(app_set) +class SearchByNameFilter: + """Filter installation by name.""" + def __init__(self, backend_name: str | None) -> None: + """Init filter.""" + self.backend_name = backend_name -@dataclass -class DeviceInfo: - """Device information.""" - - device_type: Literal["ethos-u55", "ethos-u65"] - mac: int - - -@dataclass -class ModelInfo: - """Model info.""" - - model_path: Path - - -@dataclass -class PerformanceMetrics: - """Performance metrics parsed from generic inference output.""" - - npu_active_cycles: int - npu_idle_cycles: int - npu_total_cycles: int - npu_axi0_rd_data_beat_received: int - npu_axi0_wr_data_beat_written: int - npu_axi1_rd_data_beat_received: int - - -@dataclass -class ExecutionParams: - """Application execution params.""" - - application: str - system: str - application_params: list[str] - system_params: list[str] - - -class LogWriter(OutputConsumer): - """Redirect output to the logger.""" - - def feed(self, line: str) -> bool: - """Process line from the output.""" - logger.debug(line.strip()) - return False + def __call__(self, installation: Installation) -> bool: + """Installation filter.""" + return ( + not self.backend_name + or installation.name.casefold() == self.backend_name.casefold() + ) -class GenericInferenceOutputParser(Base64OutputConsumer): - """Generic inference app output parser.""" +class InstallationManager(ABC): + """Helper class for managing installations.""" - def __init__(self) -> None: - """Init generic inference output parser instance.""" - super().__init__() - self._map = { - "NPU ACTIVE": "npu_active_cycles", - "NPU IDLE": "npu_idle_cycles", - "NPU TOTAL": "npu_total_cycles", - "NPU AXI0_RD_DATA_BEAT_RECEIVED": "npu_axi0_rd_data_beat_received", - "NPU AXI0_WR_DATA_BEAT_WRITTEN": "npu_axi0_wr_data_beat_written", - "NPU AXI1_RD_DATA_BEAT_RECEIVED": "npu_axi1_rd_data_beat_received", - } + @abstractmethod + def install_from(self, backend_path: Path, backend_name: str, force: bool) -> None: + """Install backend from the local directory.""" - @property - def result(self) -> dict: - """Merge the raw results and map the names to the right output names.""" - merged_result = {} - for raw_result in self.parsed_output: - for profiling_result in raw_result: - for sample in profiling_result["samples"]: - name, values = (sample["name"], sample["value"]) - if name in merged_result: - raise KeyError( - f"Duplicate key '{name}' in base64 output.", - ) - new_name = self._map[name] - merged_result[new_name] = values[0] - return merged_result + @abstractmethod + def download_and_install( + self, backend_name: str, eula_agreement: bool, force: bool + ) -> None: + """Download and install backends.""" - def is_ready(self) -> bool: - """Return true if all expected data has been parsed.""" - return set(self.result.keys()) == set(self._map.values()) + @abstractmethod + def show_env_details(self) -> None: + """Show environment details.""" - def missed_keys(self) -> set[str]: - """Return a set of the keys that have not been found in the output.""" - return set(self._map.values()) - set(self.result.keys()) + @abstractmethod + def backend_installed(self, backend_name: str) -> bool: + """Return true if requested backend installed.""" + @abstractmethod + def uninstall(self, backend_name: str) -> None: + """Delete the existing installation.""" -class BackendRunner: - """Backend runner.""" - def __init__(self) -> None: - """Init BackendRunner instance.""" +class InstallationFiltersMixin: + """Mixin for filtering installation based on different conditions.""" - @staticmethod - def get_installed_systems() -> list[str]: - """Get list of the installed systems.""" - return [system.name for system in get_available_systems()] + installations: list[Installation] - @staticmethod - def get_installed_applications(system: str | None = None) -> list[str]: - """Get list of the installed application.""" + def filter_by(self, *filters: InstallationFilter) -> list[Installation]: + """Filter installations.""" return [ - app.name - for app in get_available_applications() - if system is None or app.can_run_on(system) + installation + for installation in self.installations + if all(filter_(installation) for filter_ in filters) ] - def is_application_installed(self, application: str, system: str) -> bool: - """Return true if requested application installed.""" - return application in self.get_installed_applications(system) - - def is_system_installed(self, system: str) -> bool: - """Return true if requested system installed.""" - return system in self.get_installed_systems() - - def systems_installed(self, systems: list[str]) -> bool: - """Check if all provided systems are installed.""" - if not systems: - return False - - installed_systems = self.get_installed_systems() - return all(system in installed_systems for system in systems) - - def applications_installed(self, applications: list[str]) -> bool: - """Check if all provided applications are installed.""" - if not applications: - return False - - installed_apps = self.get_installed_applications() - return all(app in installed_apps for app in applications) + def find_by_name(self, backend_name: str) -> list[Installation]: + """Return list of the backends filtered by name.""" + return self.filter_by(SearchByNameFilter(backend_name)) - def all_installed(self, systems: list[str], apps: list[str]) -> bool: - """Check if all provided artifacts are installed.""" - return self.systems_installed(systems) and self.applications_installed(apps) - - @staticmethod - def install_system(system_path: Path) -> None: - """Install system.""" - install_system(system_path) - - @staticmethod - def install_application(app_path: Path) -> None: - """Install application.""" - install_application(app_path) - - @staticmethod - def run_application(execution_params: ExecutionParams) -> ExecutionContext: - """Run requested application.""" - ctx = run_application( - execution_params.application, - execution_params.application_params, - execution_params.system, - execution_params.system_params, + def already_installed(self, backend_name: str = None) -> list[Installation]: + """Return list of backends that are already installed.""" + return self.filter_by( + AlreadyInstalledFilter(), + SearchByNameFilter(backend_name), ) - return ctx - @staticmethod - def _params(name: str, params: list[str]) -> list[str]: - return [p for item in [(name, param) for param in params] for p in item] + def ready_for_installation(self) -> list[Installation]: + """Return list of the backends that could be installed.""" + return self.filter_by(ReadyForInstallationFilter()) -class GenericInferenceRunner(ABC): - """Abstract class for generic inference runner.""" +class DefaultInstallationManager(InstallationManager, InstallationFiltersMixin): + """Interactive installation manager.""" - def __init__(self, backend_runner: BackendRunner): - """Init generic inference runner instance.""" - self.backend_runner = backend_runner - - def run( - self, model_info: ModelInfo, output_consumers: list[OutputConsumer] + def __init__( + self, installations: list[Installation], noninteractive: bool = False ) -> None: - """Run generic inference for the provided device/model.""" - execution_params = self.get_execution_params(model_info) - - ctx = self.backend_runner.run_application(execution_params) - if ctx.stdout is not None: - ctx.stdout = self.consume_output(ctx.stdout, output_consumers) - - @abstractmethod - def get_execution_params(self, model_info: ModelInfo) -> ExecutionParams: - """Get execution params for the provided model.""" - - def check_system_and_application(self, system_name: str, app_name: str) -> None: - """Check if requested system and application installed.""" - if not self.backend_runner.is_system_installed(system_name): - raise Exception(f"System {system_name} is not installed") - - if not self.backend_runner.is_application_installed(app_name, system_name): - raise Exception( - f"Application {app_name} for the system {system_name} " - "is not installed" + """Init the manager.""" + self.installations = installations + self.noninteractive = noninteractive + + def _install( + self, + backend_name: str, + install_type: InstallationType, + prompt: Callable[[Installation], str], + force: bool, + ) -> None: + """Check metadata and install backend.""" + installs = self.find_by_name(backend_name) + + if not installs: + logger.info("Unknown backend '%s'.", backend_name) + logger.info( + "Please run command 'mlia-backend list' to get list of " + "supported backend names." ) - @staticmethod - def consume_output(output: bytearray, consumers: list[OutputConsumer]) -> bytearray: - """ - Pass program's output to the consumers and filter it. + return + + if len(installs) > 1: + raise InternalError(f"More than one backend with name {backend_name} found") + + installation = installs[0] + if not installation.supports(install_type): + if isinstance(install_type, InstallFromPath): + logger.info( + "Backend '%s' could not be installed using path '%s'.", + installation.name, + install_type.backend_path, + ) + logger.info( + "Please check that '%s' is a valid path to the installed backend.", + install_type.backend_path, + ) + else: + logger.info( + "Backend '%s' could not be downloaded and installed", + installation.name, + ) + logger.info( + "Please refer to the project's documentation for more details." + ) + + return + + if installation.already_installed and not force: + logger.info("Backend '%s' is already installed.", installation.name) + logger.info("Please, consider using --force option.") + return + + proceed = self.noninteractive or yes(prompt(installation)) + if not proceed: + logger.info("%s installation canceled.", installation.name) + return + + if installation.already_installed and force: + logger.info( + "Force installing %s, so delete the existing " + "installed backend first.", + installation.name, + ) + installation.uninstall() - Returns the filtered output. - """ - filtered_output = bytearray() - for line_bytes in output.splitlines(): - line = line_bytes.decode("utf-8") - remove_line = False - for consumer in consumers: - if consumer.feed(line): - remove_line = True - if not remove_line: - filtered_output.extend(line_bytes) + installation.install(install_type) + logger.info("%s successfully installed.", installation.name) - return filtered_output + def install_from( + self, backend_path: Path, backend_name: str, force: bool = False + ) -> None: + """Install from the provided directory.""" + def prompt(install: Installation) -> str: + return ( + f"{install.name} was found in {backend_path}. " + "Would you like to install it?" + ) -class GenericInferenceRunnerEthosU(GenericInferenceRunner): - """Generic inference runner on U55/65.""" + install_type = InstallFromPath(backend_path) + self._install(backend_name, install_type, prompt, force) - def __init__( - self, backend_runner: BackendRunner, device_info: DeviceInfo, backend: str + def download_and_install( + self, backend_name: str, eula_agreement: bool = True, force: bool = False ) -> None: - """Init generic inference runner instance.""" - super().__init__(backend_runner) + """Download and install available backends.""" - system_name, app_name = self.resolve_system_and_app(device_info, backend) - self.system_name = system_name - self.app_name = app_name - self.device_info = device_info + def prompt(install: Installation) -> str: + return f"Would you like to download and install {install.name}?" - @staticmethod - def resolve_system_and_app( - device_info: DeviceInfo, backend: str - ) -> tuple[str, str]: - """Find appropriate system and application for the provided device/backend.""" - try: - system_name = get_system_name(backend, device_info.device_type) - except KeyError as ex: - raise RuntimeError( - f"Unsupported device {device_info.device_type} " - f"for backend {backend}" - ) from ex - - try: - app_name = _SYSTEM_TO_APP_MAP[system_name] - except KeyError as err: - raise RuntimeError(f"System {system_name} is not installed") from err - - return system_name, app_name - - def get_execution_params(self, model_info: ModelInfo) -> ExecutionParams: - """Get execution params for Ethos-U55/65.""" - self.check_system_and_application(self.system_name, self.app_name) - - system_params = [ - f"mac={self.device_info.mac}", - f"input_file={model_info.model_path.absolute()}", - ] + install_type = DownloadAndInstall(eula_agreement=eula_agreement) + self._install(backend_name, install_type, prompt, force) - return ExecutionParams( - self.app_name, - self.system_name, - [], - system_params, - ) + def show_env_details(self) -> None: + """Print current state of the execution environment.""" + if installed := self.already_installed(): + self._print_installation_list("Installed backends:", installed) + + if could_be_installed := self.ready_for_installation(): + self._print_installation_list( + "Following backends could be installed:", + could_be_installed, + new_section=bool(installed), + ) + if not installed and not could_be_installed: + logger.info("No backends installed") -def get_generic_runner(device_info: DeviceInfo, backend: str) -> GenericInferenceRunner: - """Get generic runner for provided device and backend.""" - backend_runner = get_backend_runner() - return GenericInferenceRunnerEthosU(backend_runner, device_info, backend) + @staticmethod + def _print_installation_list( + header: str, installations: list[Installation], new_section: bool = False + ) -> None: + """Print list of the installations.""" + logger.info("%s%s\n", "\n" if new_section else "", header) + for installation in installations: + logger.info(" - %s", installation.name) -def estimate_performance( - model_info: ModelInfo, device_info: DeviceInfo, backend: str -) -> PerformanceMetrics: - """Get performance estimations.""" - output_parser = GenericInferenceOutputParser() - output_consumers = [output_parser, LogWriter()] + def uninstall(self, backend_name: str) -> None: + """Uninstall the backend with name backend_name.""" + installations = self.already_installed(backend_name) - generic_runner = get_generic_runner(device_info, backend) - generic_runner.run(model_info, output_consumers) + if not installations: + raise ConfigurationError(f"Backend '{backend_name}' is not installed") - if not output_parser.is_ready(): - missed_data = ",".join(output_parser.missed_keys()) - logger.debug("Unable to get performance metrics, missed data %s", missed_data) - raise Exception("Unable to get performance metrics, insufficient data") + if len(installations) != 1: + raise InternalError( + f"More than one installed backend with name {backend_name} found" + ) - return PerformanceMetrics(**output_parser.result) + installation = installations[0] + installation.uninstall() + logger.info("%s successfully uninstalled.", installation.name) -def get_backend_runner() -> BackendRunner: - """ - Return BackendRunner instance. + def backend_installed(self, backend_name: str) -> bool: + """Return true if requested backend installed.""" + installations = self.already_installed(backend_name) - Note: This is needed for the unit tests. - """ - return BackendRunner() + return len(installations) == 1 diff --git a/src/mlia/backend/output_consumer.py b/src/mlia/backend/output_consumer.py deleted file mode 100644 index 3c3b132..0000000 --- a/src/mlia/backend/output_consumer.py +++ /dev/null @@ -1,67 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Output consumers module.""" -from __future__ import annotations - -import base64 -import json -import re -from typing import Protocol -from typing import runtime_checkable - - -@runtime_checkable -class OutputConsumer(Protocol): - """Protocol to consume output.""" - - def feed(self, line: str) -> bool: - """ - Feed a new line to be parsed. - - Return True if the line should be removed from the output. - """ - - -class Base64OutputConsumer(OutputConsumer): - """ - Parser to extract base64-encoded JSON from tagged standard output. - - Example of the tagged output: - ``` - # Encoded JSON: {"test": 1234} - eyJ0ZXN0IjogMTIzNH0 - ``` - """ - - TAG_NAME = "metrics" - - def __init__(self) -> None: - """Set up the regular expression to extract tagged strings.""" - self._regex = re.compile(rf"<{self.TAG_NAME}>(.*)") - self.parsed_output: list = [] - - def feed(self, line: str) -> bool: - """ - Parse the output line and save the decoded output. - - Returns True if the line contains tagged output. - - Example: - Using the tagged output from the class docs the parser should collect - the following: - ``` - [ - {"test": 1234} - ] - ``` - """ - res_b64 = self._regex.search(line) - if res_b64: - res_json = base64.b64decode(res_b64.group(1), validate=True) - res = json.loads(res_json) - self.parsed_output.append(res) - # Remove this line from the output, i.e. consume it, as it - # does not contain any human readable content. - return True - - return False diff --git a/src/mlia/backend/proc.py b/src/mlia/backend/proc.py deleted file mode 100644 index 4838e47..0000000 --- a/src/mlia/backend/proc.py +++ /dev/null @@ -1,191 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Processes module. - -This module contains all classes and functions for dealing with Linux -processes. -""" -from __future__ import annotations - -import datetime -import logging -import shlex -import signal -import tempfile -import time -from pathlib import Path -from typing import Any - -from sh import Command -from sh import CommandNotFound -from sh import ErrorReturnCode -from sh import RunningCommand - -from mlia.backend.fs import valid_for_filename - -logger = logging.getLogger(__name__) - - -class CommandFailedException(Exception): - """Exception for failed command execution.""" - - -class ShellCommand: - """Wrapper class for shell commands.""" - - def run( - self, - cmd: str, - *args: str, - _cwd: Path | None = None, - _tee: bool = True, - _bg: bool = True, - _out: Any = None, - _err: Any = None, - _search_paths: list[Path] | None = None, - ) -> RunningCommand: - """Run the shell command with the given arguments. - - There are special arguments that modify the behaviour of the process. - _cwd: current working directory - _tee: it redirects the stdout both to console and file - _bg: if True, it runs the process in background and the command is not - blocking. - _out: use this object for stdout redirect, - _err: use this object for stderr redirect, - _search_paths: If presented used for searching executable - """ - try: - kwargs = {} - if _cwd: - kwargs["_cwd"] = str(_cwd) - command = Command(cmd, _search_paths).bake(args, **kwargs) - except CommandNotFound as error: - logging.error("Command '%s' not found", error.args[0]) - raise error - - out, err = _out, _err - if not _out and not _err: - out, err = (str(item) for item in self.get_stdout_stderr_paths(cmd)) - - return command(_out=out, _err=err, _tee=_tee, _bg=_bg, _bg_exc=False) - - @classmethod - def get_stdout_stderr_paths(cls, cmd: str) -> tuple[Path, Path]: - """Construct and returns the paths of stdout/stderr files.""" - timestamp = datetime.datetime.now().timestamp() - base_path = Path(tempfile.mkdtemp(prefix="mlia-", suffix=f"{timestamp}")) - base = base_path / f"{valid_for_filename(cmd, '_')}_{timestamp}" - stdout = base.with_suffix(".out") - stderr = base.with_suffix(".err") - try: - stdout.touch() - stderr.touch() - except FileNotFoundError as error: - logging.error("File not found: %s", error.filename) - raise error - return stdout, stderr - - -def parse_command(command: str, shell: str = "bash") -> list[str]: - """Parse command.""" - cmd, *args = shlex.split(command, posix=True) - - if is_shell_script(cmd): - args = [cmd] + args - cmd = shell - - return [cmd] + args - - -def execute_command( # pylint: disable=invalid-name - command: str, - cwd: Path, - bg: bool = False, - shell: str = "bash", - out: Any = None, - err: Any = None, -) -> RunningCommand: - """Execute shell command.""" - cmd, *args = parse_command(command, shell) - - search_paths = None - if cmd != shell and (cwd / cmd).is_file(): - search_paths = [cwd] - - return ShellCommand().run( - cmd, *args, _cwd=cwd, _bg=bg, _search_paths=search_paths, _out=out, _err=err - ) - - -def is_shell_script(cmd: str) -> bool: - """Check if command is shell script.""" - return cmd.endswith(".sh") - - -def run_and_wait( - command: str, - cwd: Path, - terminate_on_error: bool = False, - out: Any = None, - err: Any = None, -) -> tuple[int, bytearray, bytearray]: - """ - Run command and wait while it is executing. - - Returns a tuple: (exit_code, stdout, stderr) - """ - running_cmd: RunningCommand | None = None - try: - running_cmd = execute_command(command, cwd, bg=True, out=out, err=err) - return running_cmd.exit_code, running_cmd.stdout, running_cmd.stderr - except ErrorReturnCode as cmd_failed: - raise CommandFailedException() from cmd_failed - except Exception as error: - is_running = running_cmd is not None and running_cmd.is_alive() - if terminate_on_error and is_running: - logger.debug("Terminating ...") - terminate_command(running_cmd) - - raise error - - -def terminate_command( - running_cmd: RunningCommand, - wait: bool = True, - wait_period: float = 0.5, - number_of_attempts: int = 20, -) -> None: - """Terminate running command.""" - try: - running_cmd.process.signal_group(signal.SIGINT) - if wait: - for _ in range(number_of_attempts): - time.sleep(wait_period) - if not running_cmd.is_alive(): - return - logger.error( - "Unable to terminate process %i. Sending SIGTERM...", - running_cmd.process.pid, - ) - running_cmd.process.signal_group(signal.SIGTERM) - except ProcessLookupError: - pass - - -def print_command_stdout(command: RunningCommand) -> None: - """Print the stdout of a command. - - The command has 2 states: running and done. - If the command is running, the output is taken by the running process. - If the command has ended its execution, the stdout is taken from stdout - property - """ - if command.is_alive(): - while True: - try: - print(command.next(), end="") - except StopIteration: - break - else: - print(command.stdout) diff --git a/src/mlia/backend/source.py b/src/mlia/backend/source.py deleted file mode 100644 index c951eae..0000000 --- a/src/mlia/backend/source.py +++ /dev/null @@ -1,207 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Contain source related classes and functions.""" -from __future__ import annotations - -import os -import shutil -import tarfile -from abc import ABC -from abc import abstractmethod -from pathlib import Path -from tarfile import TarFile - -from mlia.backend.common import BACKEND_CONFIG_FILE -from mlia.backend.common import ConfigurationException -from mlia.backend.common import get_backend_config -from mlia.backend.common import is_backend_directory -from mlia.backend.common import load_config -from mlia.backend.config import BackendConfig -from mlia.backend.fs import copy_directory_content - - -class Source(ABC): - """Source class.""" - - @abstractmethod - def name(self) -> str | None: - """Get source name.""" - - @abstractmethod - def config(self) -> BackendConfig | None: - """Get configuration file content.""" - - @abstractmethod - def install_into(self, destination: Path) -> None: - """Install source into destination directory.""" - - @abstractmethod - def create_destination(self) -> bool: - """Return True if destination folder should be created before installation.""" - - -class DirectorySource(Source): - """DirectorySource class.""" - - def __init__(self, directory_path: Path) -> None: - """Create the DirectorySource instance.""" - assert isinstance(directory_path, Path) - self.directory_path = directory_path - - def name(self) -> str: - """Return name of source.""" - return self.directory_path.name - - def config(self) -> BackendConfig | None: - """Return configuration file content.""" - if not is_backend_directory(self.directory_path): - raise ConfigurationException("No configuration file found") - - config_file = get_backend_config(self.directory_path) - return load_config(config_file) - - def install_into(self, destination: Path) -> None: - """Install source into destination directory.""" - if not destination.is_dir(): - raise ConfigurationException(f"Wrong destination {destination}.") - - if not self.directory_path.is_dir(): - raise ConfigurationException( - f"Directory {self.directory_path} does not exist." - ) - - copy_directory_content(self.directory_path, destination) - - def create_destination(self) -> bool: - """Return True if destination folder should be created before installation.""" - return True - - -class TarArchiveSource(Source): - """TarArchiveSource class.""" - - def __init__(self, archive_path: Path) -> None: - """Create the TarArchiveSource class.""" - assert isinstance(archive_path, Path) - self.archive_path = archive_path - self._config: BackendConfig | None = None - self._has_top_level_folder: bool | None = None - self._name: str | None = None - - def _read_archive_content(self) -> None: - """Read various information about archive.""" - # get source name from archive name (everything without extensions) - extensions = "".join(self.archive_path.suffixes) - self._name = self.archive_path.name.rstrip(extensions) - - if not self.archive_path.exists(): - return - - with self._open(self.archive_path) as archive: - try: - config_entry = archive.getmember(BACKEND_CONFIG_FILE) - self._has_top_level_folder = False - except KeyError as error_no_config: - try: - archive_entries = archive.getnames() - entries_common_prefix = os.path.commonprefix(archive_entries) - top_level_dir = entries_common_prefix.rstrip("/") - - if not top_level_dir: - raise RuntimeError( - "Archive has no top level directory" - ) from error_no_config - - config_path = f"{top_level_dir}/{BACKEND_CONFIG_FILE}" - - config_entry = archive.getmember(config_path) - self._has_top_level_folder = True - self._name = top_level_dir - except (KeyError, RuntimeError) as error_no_root_dir_or_config: - raise ConfigurationException( - "No configuration file found" - ) from error_no_root_dir_or_config - - content = archive.extractfile(config_entry) - self._config = load_config(content) - - def config(self) -> BackendConfig | None: - """Return configuration file content.""" - if self._config is None: - self._read_archive_content() - - return self._config - - def name(self) -> str | None: - """Return name of the source.""" - if self._name is None: - self._read_archive_content() - - return self._name - - def create_destination(self) -> bool: - """Return True if destination folder must be created before installation.""" - if self._has_top_level_folder is None: - self._read_archive_content() - - return not self._has_top_level_folder - - def install_into(self, destination: Path) -> None: - """Install source into destination directory.""" - if not destination.is_dir(): - raise ConfigurationException(f"Wrong destination {destination}.") - - with self._open(self.archive_path) as archive: - archive.extractall(destination) - - def _open(self, archive_path: Path) -> TarFile: - """Open archive file.""" - if not archive_path.is_file(): - raise ConfigurationException(f"File {archive_path} does not exist.") - - if archive_path.name.endswith("tar.gz") or archive_path.name.endswith("tgz"): - mode = "r:gz" - else: - raise ConfigurationException(f"Unsupported archive type {archive_path}.") - - # The returned TarFile object can be used as a context manager (using - # 'with') by the calling instance. - return tarfile.open( # pylint: disable=consider-using-with - self.archive_path, mode=mode - ) - - -def get_source(source_path: Path) -> TarArchiveSource | DirectorySource: - """Return appropriate source instance based on provided source path.""" - if source_path.is_file(): - return TarArchiveSource(source_path) - - if source_path.is_dir(): - return DirectorySource(source_path) - - raise ConfigurationException(f"Unable to read {source_path}.") - - -def create_destination_and_install(source: Source, resource_path: Path) -> None: - """Create destination directory and install source. - - This function is used for actual installation of system/backend New - directory will be created inside :resource_path: if needed If for example - archive contains top level folder then no need to create new directory - """ - destination = resource_path - create_destination = source.create_destination() - - if create_destination: - name = source.name() - if not name: - raise ConfigurationException("Unable to get source name.") - - destination = resource_path / name - destination.mkdir() - try: - source.install_into(destination) - except Exception as error: - if create_destination: - shutil.rmtree(destination) - raise error diff --git a/src/mlia/backend/system.py b/src/mlia/backend/system.py deleted file mode 100644 index 0e51ab2..0000000 --- a/src/mlia/backend/system.py +++ /dev/null @@ -1,178 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""System backend module.""" -from __future__ import annotations - -from pathlib import Path -from typing import Any -from typing import cast -from typing import List - -from mlia.backend.common import Backend -from mlia.backend.common import ConfigurationException -from mlia.backend.common import get_backend_configs -from mlia.backend.common import get_backend_directories -from mlia.backend.common import load_config -from mlia.backend.common import remove_backend -from mlia.backend.config import SystemConfig -from mlia.backend.fs import get_backends_path -from mlia.backend.proc import run_and_wait -from mlia.backend.source import create_destination_and_install -from mlia.backend.source import get_source - - -class System(Backend): - """System class.""" - - def __init__(self, config: SystemConfig) -> None: - """Construct the System class using the dictionary passed.""" - super().__init__(config) - - self._setup_reporting(config) - - def _setup_reporting(self, config: SystemConfig) -> None: - self.reporting = config.get("reporting") - - def run(self, command: str) -> tuple[int, bytearray, bytearray]: - """ - Run command on the system. - - Returns a tuple: (exit_code, stdout, stderr) - """ - cwd = self.config_location - if not isinstance(cwd, Path) or not cwd.is_dir(): - raise ConfigurationException( - f"System has invalid config location: {cwd}", - ) - - stdout = bytearray() - stderr = bytearray() - - return run_and_wait( - command, - cwd=cwd, - terminate_on_error=True, - out=stdout, - err=stderr, - ) - - def __eq__(self, other: object) -> bool: - """Overload operator ==.""" - if not isinstance(other, System): - return False - - return super().__eq__(other) and self.name == other.name - - def get_details(self) -> dict[str, Any]: - """Return a dictionary with all relevant information of a System.""" - output = { - "type": "system", - "name": self.name, - "description": self.description, - "commands": self._get_command_details(), - "annotations": self.annotations, - } - - return output - - -def get_available_systems_directory_names() -> list[str]: - """Return a list of directory names for all avialable systems.""" - return [entry.name for entry in get_backend_directories("systems")] - - -def get_available_systems() -> list[System]: - """Return a list with all available systems.""" - available_systems = [] - for config_json in get_backend_configs("systems"): - config_entries = cast(List[SystemConfig], (load_config(config_json))) - for config_entry in config_entries: - config_entry["config_location"] = config_json.parent.absolute() - system = load_system(config_entry) - available_systems.append(system) - - return sorted(available_systems, key=lambda system: system.name) - - -def get_system(system_name: str) -> System: - """Return a system instance with the same name passed as argument.""" - available_systems = get_available_systems() - for system in available_systems: - if system_name == system.name: - return system - raise ConfigurationException(f"System '{system_name}' not found.") - - -def install_system(source_path: Path) -> None: - """Install new system.""" - try: - source = get_source(source_path) - config = cast(List[SystemConfig], source.config()) - systems_to_install = [load_system(entry) for entry in config] - except Exception as error: - raise ConfigurationException("Unable to read system definition") from error - - if not systems_to_install: - raise ConfigurationException("No system definition found") - - available_systems = get_available_systems() - already_installed = [s for s in systems_to_install if s in available_systems] - if already_installed: - names = [system.name for system in already_installed] - raise ConfigurationException( - f"Systems [{','.join(names)}] are already installed." - ) - - create_destination_and_install(source, get_backends_path("systems")) - - -def remove_system(directory_name: str) -> None: - """Remove system.""" - remove_backend(directory_name, "systems") - - -def load_system(config: SystemConfig) -> System: - """Load system based on it's execution type.""" - populate_shared_params(config) - - return System(config) - - -def populate_shared_params(config: SystemConfig) -> None: - """Populate command parameters with shared parameters.""" - user_params = config.get("user_params") - if not user_params or "shared" not in user_params: - return - - shared_user_params = user_params["shared"] - if not shared_user_params: - return - - only_aliases = all(p.get("alias") for p in shared_user_params) - if not only_aliases: - raise ConfigurationException("All shared parameters should have aliases") - - commands = config.get("commands", {}) - for cmd_name in ["run"]: - command = commands.get(cmd_name) - if command is None: - commands[cmd_name] = [] - cmd_user_params = user_params.get(cmd_name) - if not cmd_user_params: - cmd_user_params = shared_user_params - else: - only_aliases = all(p.get("alias") for p in cmd_user_params) - if not only_aliases: - raise ConfigurationException( - f"All parameters for command {cmd_name} should have aliases." - ) - merged_by_alias = { - **{p.get("alias"): p for p in shared_user_params}, - **{p.get("alias"): p for p in cmd_user_params}, - } - cmd_user_params = list(merged_by_alias.values()) - - user_params[cmd_name] = cmd_user_params - - config["commands"] = commands - del user_params["shared"] diff --git a/src/mlia/backend/tosa_checker/__init__.py b/src/mlia/backend/tosa_checker/__init__.py new file mode 100644 index 0000000..cec210d --- /dev/null +++ b/src/mlia/backend/tosa_checker/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""TOSA checker backend module.""" diff --git a/src/mlia/backend/tosa_checker/install.py b/src/mlia/backend/tosa_checker/install.py new file mode 100644 index 0000000..72454bc --- /dev/null +++ b/src/mlia/backend/tosa_checker/install.py @@ -0,0 +1,19 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Module for python package based installations.""" +from __future__ import annotations + +from mlia.backend.install import Installation +from mlia.backend.install import PyPackageBackendInstallation + + +def get_tosa_backend_installation() -> Installation: + """Get TOSA backend installation.""" + return PyPackageBackendInstallation( + name="tosa-checker", + description="Tool to check if a ML model is compatible " + "with the TOSA specification", + packages_to_install=["mlia[tosa]"], + packages_to_uninstall=["tosa-checker"], + expected_packages=["tosa-checker"], + ) diff --git a/src/mlia/backend/vela/__init__.py b/src/mlia/backend/vela/__init__.py new file mode 100644 index 0000000..6ea0c21 --- /dev/null +++ b/src/mlia/backend/vela/__init__.py @@ -0,0 +1,3 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Vela backend module.""" diff --git a/src/mlia/backend/vela/compat.py b/src/mlia/backend/vela/compat.py new file mode 100644 index 0000000..3ec42d1 --- /dev/null +++ b/src/mlia/backend/vela/compat.py @@ -0,0 +1,158 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Vela operator compatibility module.""" +from __future__ import annotations + +import itertools +import logging +from dataclasses import dataclass +from pathlib import Path + +from ethosu.vela.operation import Op +from ethosu.vela.tflite_mapping import optype_to_builtintype +from ethosu.vela.tflite_model_semantic import TFLiteSemantic +from ethosu.vela.tflite_supported_operators import TFLiteSupportedOperators +from ethosu.vela.vela import generate_supported_ops + +from mlia.backend.vela.compiler import VelaCompiler +from mlia.backend.vela.compiler import VelaCompilerOptions +from mlia.utils.logging import redirect_output + + +logger = logging.getLogger(__name__) + +VELA_INTERNAL_OPS = (Op.Placeholder, Op.SubgraphInput, Op.Const) + + +@dataclass +class NpuSupported: + """Operator's npu supported attribute.""" + + supported: bool + reasons: list[tuple[str, str]] + + +@dataclass +class Operator: + """Model operator.""" + + name: str + op_type: str + run_on_npu: NpuSupported + + @property + def cpu_only(self) -> bool: + """Return true if operator is CPU only.""" + cpu_only_reasons = [("CPU only operator", "")] + return ( + not self.run_on_npu.supported + and self.run_on_npu.reasons == cpu_only_reasons + ) + + +@dataclass +class Operators: + """Model's operators.""" + + ops: list[Operator] + + @property + def npu_supported_ratio(self) -> float: + """Return NPU supported ratio.""" + total = self.total_number + npu_supported = self.npu_supported_number + + if total == 0 or npu_supported == 0: + return 0 + + return npu_supported / total + + @property + def npu_unsupported_ratio(self) -> float: + """Return NPU unsupported ratio.""" + return 1 - self.npu_supported_ratio + + @property + def total_number(self) -> int: + """Return total number of operators.""" + return len(self.ops) + + @property + def npu_supported_number(self) -> int: + """Return number of npu supported operators.""" + return sum(op.run_on_npu.supported for op in self.ops) + + +def supported_operators( + model_path: Path, compiler_options: VelaCompilerOptions +) -> Operators: + """Return list of model's operators.""" + logger.debug("Check supported operators for the model %s", model_path) + + vela_compiler = VelaCompiler(compiler_options) + initial_model = vela_compiler.read_model(model_path) + + return Operators( + [ + Operator(op.name, optype_to_builtintype(op.type), run_on_npu(op)) + for sg in initial_model.nng.subgraphs + for op in sg.get_all_ops() + if op.type not in VELA_INTERNAL_OPS + ] + ) + + +def run_on_npu(operator: Op) -> NpuSupported: + """Return information if operator can run on NPU. + + Vela does a number of checks that can help establish whether + a particular operator is supported to run on NPU. + + There are two groups of checks: + - general TensorFlow Lite constraints + - operator specific constraints + + If an operator is not supported on NPU then this function + will return the reason of that. + + The reason is split in two parts: + - general description of why the operator cannot be placed on NPU + - details on the particular operator + """ + semantic_checker = TFLiteSemantic() + semantic_constraints = itertools.chain( + semantic_checker.generic_constraints, + semantic_checker.specific_constraints[operator.type], + ) + + for constraint in semantic_constraints: + op_valid, op_reason = constraint(operator) + if not op_valid: + return NpuSupported(False, [(constraint.__doc__, op_reason)]) + + if operator.type not in TFLiteSupportedOperators.supported_operators: + reasons = ( + [("CPU only operator", "")] + if operator.type not in VELA_INTERNAL_OPS + else [] + ) + + return NpuSupported(False, reasons) + + tflite_supported_operators = TFLiteSupportedOperators() + operation_constraints = itertools.chain( + tflite_supported_operators.generic_constraints, + tflite_supported_operators.specific_constraints[operator.type], + ) + for constraint in operation_constraints: + op_valid, op_reason = constraint(operator) + if not op_valid: + return NpuSupported(False, [(constraint.__doc__, op_reason)]) + + return NpuSupported(True, []) + + +def generate_supported_operators_report() -> None: + """Generate supported operators report in current working directory.""" + with redirect_output(logger): + generate_supported_ops() diff --git a/src/mlia/backend/vela/compiler.py b/src/mlia/backend/vela/compiler.py new file mode 100644 index 0000000..3d3847a --- /dev/null +++ b/src/mlia/backend/vela/compiler.py @@ -0,0 +1,274 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Vela compiler wrapper module.""" +from __future__ import annotations + +import logging +import sys +from dataclasses import dataclass +from pathlib import Path +from typing import Any +from typing import Literal + +from ethosu.vela.architecture_features import ArchitectureFeatures +from ethosu.vela.compiler_driver import compiler_driver +from ethosu.vela.compiler_driver import CompilerOptions +from ethosu.vela.compiler_driver import TensorAllocator +from ethosu.vela.model_reader import ModelReaderOptions +from ethosu.vela.model_reader import read_model +from ethosu.vela.nn_graph import Graph +from ethosu.vela.nn_graph import NetworkType +from ethosu.vela.operation import CustomType +from ethosu.vela.scheduler import OptimizationStrategy +from ethosu.vela.scheduler import SchedulerOptions +from ethosu.vela.tensor import BandwidthDirection +from ethosu.vela.tensor import MemArea +from ethosu.vela.tensor import Tensor +from ethosu.vela.tflite_writer import write_tflite + +from mlia.utils.logging import redirect_output + +logger = logging.getLogger(__name__) + + +@dataclass +class Model: + """Model metadata.""" + + nng: Graph + network_type: NetworkType + + @property + def optimized(self) -> bool: + """Return true if model is already optimized.""" + return any( + op.attrs.get("custom_type") == CustomType.ExistingNpuOp + for sg in self.nng.subgraphs + for op in sg.get_all_ops() + ) + + +@dataclass +class OptimizedModel: + """Instance of the Vela optimized model.""" + + nng: Graph + arch: ArchitectureFeatures + compiler_options: CompilerOptions + scheduler_options: SchedulerOptions + + def save(self, output_filename: str | Path) -> None: + """Save instance of the optimized model to the file.""" + write_tflite(self.nng, output_filename) + + +AcceleratorConfigType = Literal[ + "ethos-u55-32", + "ethos-u55-64", + "ethos-u55-128", + "ethos-u55-256", + "ethos-u65-256", + "ethos-u65-512", +] + +TensorAllocatorType = Literal["LinearAlloc", "Greedy", "HillClimb"] + +OptimizationStrategyType = Literal["Performance", "Size"] + + +@dataclass +class VelaCompilerOptions: # pylint: disable=too-many-instance-attributes + """Vela compiler options.""" + + config_files: str | list[str] | None = None + system_config: str = ArchitectureFeatures.DEFAULT_CONFIG + memory_mode: str = ArchitectureFeatures.DEFAULT_CONFIG + accelerator_config: AcceleratorConfigType | None = None + max_block_dependency: int = ArchitectureFeatures.MAX_BLOCKDEP + arena_cache_size: int | None = None + tensor_allocator: TensorAllocatorType = "HillClimb" + cpu_tensor_alignment: int = Tensor.AllocationQuantum + optimization_strategy: OptimizationStrategyType = "Performance" + output_dir: str | None = None + recursion_limit: int = 1000 + + +class VelaCompiler: # pylint: disable=too-many-instance-attributes + """Vela compiler wrapper.""" + + def __init__(self, compiler_options: VelaCompilerOptions): + """Init Vela wrapper instance.""" + self.config_files = compiler_options.config_files + self.system_config = compiler_options.system_config + self.memory_mode = compiler_options.memory_mode + self.accelerator_config = compiler_options.accelerator_config + self.max_block_dependency = compiler_options.max_block_dependency + self.arena_cache_size = compiler_options.arena_cache_size + self.tensor_allocator = TensorAllocator[compiler_options.tensor_allocator] + self.cpu_tensor_alignment = compiler_options.cpu_tensor_alignment + self.optimization_strategy = OptimizationStrategy[ + compiler_options.optimization_strategy + ] + self.output_dir = compiler_options.output_dir + self.recursion_limit = compiler_options.recursion_limit + + sys.setrecursionlimit(self.recursion_limit) + + def read_model(self, model: str | Path) -> Model: + """Read model.""" + logger.debug("Read model %s", model) + + nng, network_type = self._read_model(model) + return Model(nng, network_type) + + def compile_model(self, model: str | Path | Model) -> OptimizedModel: + """Compile the model.""" + if isinstance(model, (str, Path)): + nng, network_type = self._read_model(model) + else: + nng, network_type = model.nng, NetworkType.TFLite + + if not nng: + raise Exception("Unable to read model") + + try: + arch = self._architecture_features() + compiler_options = self._compiler_options() + scheduler_options = self._scheduler_options() + + with redirect_output( + logger, stdout_level=logging.DEBUG, stderr_level=logging.DEBUG + ): + compiler_driver( + nng, arch, compiler_options, scheduler_options, network_type + ) + + return OptimizedModel(nng, arch, compiler_options, scheduler_options) + except (SystemExit, Exception) as err: + raise Exception("Model could not be optimized with Vela compiler") from err + + def get_config(self) -> dict[str, Any]: + """Get compiler configuration.""" + arch = self._architecture_features() + + memory_area = { + mem.name: { + "clock_scales": arch.memory_clock_scales[mem], + "burst_length": arch.memory_burst_length[mem], + "read_latency": arch.memory_latency[mem][BandwidthDirection.Read], + "write_latency": arch.memory_latency[mem][BandwidthDirection.Write], + } + for mem in ( + MemArea.Sram, + MemArea.Dram, + MemArea.OnChipFlash, + MemArea.OffChipFlash, + ) + } + + return { + "accelerator_config": arch.accelerator_config.value, + "system_config": arch.system_config, + "core_clock": arch.core_clock, + "axi0_port": arch.axi0_port.name, + "axi1_port": arch.axi1_port.name, + "memory_mode": arch.memory_mode, + "const_mem_area": arch.const_mem_area.name, + "arena_mem_area": arch.arena_mem_area.name, + "cache_mem_area": arch.cache_mem_area.name, + "arena_cache_size": arch.arena_cache_size, + "permanent_storage_mem_area": arch.permanent_storage_mem_area.name, + "feature_map_storage_mem_area": arch.feature_map_storage_mem_area.name, + "fast_storage_mem_area": arch.fast_storage_mem_area.name, + "memory_area": memory_area, + } + + @staticmethod + def _read_model(model: str | Path) -> tuple[Graph, NetworkType]: + """Read TensorFlow Lite model.""" + try: + model_path = str(model) if isinstance(model, Path) else model + + with redirect_output( + logger, stdout_level=logging.DEBUG, stderr_level=logging.DEBUG + ): + return read_model(model_path, ModelReaderOptions()) # type: ignore + except (SystemExit, Exception) as err: + raise Exception(f"Unable to read model {model_path}") from err + + def _architecture_features(self) -> ArchitectureFeatures: + """Return ArchitectureFeatures instance.""" + return ArchitectureFeatures( + vela_config_files=self.config_files, + accelerator_config=self.accelerator_config, + system_config=self.system_config, + memory_mode=self.memory_mode, + max_blockdep=self.max_block_dependency, + verbose_config=False, + arena_cache_size=self.arena_cache_size, + ) + + def _scheduler_options(self) -> SchedulerOptions: + """Return SchedulerOptions instance.""" + arch = self._architecture_features() + + return SchedulerOptions( + optimization_strategy=self.optimization_strategy, + sram_target=arch.arena_cache_size, + verbose_schedule=False, + ) + + def _compiler_options(self) -> CompilerOptions: + """Return CompilerOptions instance.""" + return CompilerOptions( + verbose_graph=False, + verbose_quantization=False, + verbose_packing=False, + verbose_tensor_purpose=False, + verbose_tensor_format=False, + verbose_allocation=False, + verbose_high_level_command_stream=False, + verbose_register_command_stream=False, + verbose_operators=False, + verbose_weights=False, + show_cpu_operations=False, + tensor_allocator=self.tensor_allocator, + timing=False, + output_dir=self.output_dir, + cpu_tensor_alignment=self.cpu_tensor_alignment, + ) + + +def resolve_compiler_config( + vela_compiler_options: VelaCompilerOptions, +) -> dict[str, Any]: + """Resolve passed compiler options. + + Vela has number of configuration parameters that being + resolved during passing compiler options. E.g. Vela + reads configuration parameters from vela.ini and fills + it's internal structures with resolved values (memory mode, + system mode, etc.). + + In order to get this information we need to create + instance of the Vela compiler first. + """ + vela_compiler = VelaCompiler(vela_compiler_options) + return vela_compiler.get_config() + + +def optimize_model( + model_path: Path, compiler_options: VelaCompilerOptions, output_model_path: Path +) -> None: + """Optimize model and return it's path after optimization.""" + logger.debug( + "Optimize model %s for device %s", + model_path, + compiler_options.accelerator_config, + ) + + vela_compiler = VelaCompiler(compiler_options) + optimized_model = vela_compiler.compile_model(model_path) + + logger.debug("Save optimized model into %s", output_model_path) + optimized_model.save(output_model_path) diff --git a/src/mlia/backend/vela/performance.py b/src/mlia/backend/vela/performance.py new file mode 100644 index 0000000..ccd2f6f --- /dev/null +++ b/src/mlia/backend/vela/performance.py @@ -0,0 +1,97 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Vela performance module.""" +from __future__ import annotations + +import logging +from dataclasses import dataclass +from pathlib import Path + +import numpy as np +from ethosu.vela.npu_performance import PassCycles +from ethosu.vela.tensor import MemArea + +from mlia.backend.vela.compiler import OptimizedModel +from mlia.backend.vela.compiler import VelaCompiler +from mlia.backend.vela.compiler import VelaCompilerOptions + + +logger = logging.getLogger(__name__) + + +@dataclass +class PerformanceMetrics: # pylint: disable=too-many-instance-attributes + """Contains all the performance metrics Vela generates in a run.""" + + npu_cycles: int + sram_access_cycles: int + dram_access_cycles: int + on_chip_flash_access_cycles: int + off_chip_flash_access_cycles: int + total_cycles: int + batch_inference_time: float + inferences_per_second: float + batch_size: int + unknown_memory_area_size: int + sram_memory_area_size: int + dram_memory_area_size: int + on_chip_flash_memory_area_size: int + off_chip_flash_memory_area_size: int + + +def estimate_performance( + model_path: Path, compiler_options: VelaCompilerOptions +) -> PerformanceMetrics: + """Return performance estimations for the model/device. + + Logic for this function comes from Vela module stats_writer.py + """ + logger.debug( + "Estimate performance for the model %s on %s", + model_path, + compiler_options.accelerator_config, + ) + + vela_compiler = VelaCompiler(compiler_options) + + initial_model = vela_compiler.read_model(model_path) + if initial_model.optimized: + raise Exception("Unable to estimate performance for the given optimized model") + + optimized_model = vela_compiler.compile_model(initial_model) + + return _performance_metrics(optimized_model) + + +def _performance_metrics(optimized_model: OptimizedModel) -> PerformanceMetrics: + """Return performance metrics for optimized model.""" + cycles = optimized_model.nng.cycles + + def memory_usage(mem_area: MemArea) -> int: + """Get memory usage for the proviced memory area type.""" + memory_used: dict[MemArea, int] = optimized_model.nng.memory_used + bandwidths = optimized_model.nng.bandwidths + + return memory_used.get(mem_area, 0) if np.sum(bandwidths[mem_area]) > 0 else 0 + + midpoint_fps = np.nan + midpoint_inference_time = cycles[PassCycles.Total] / optimized_model.arch.core_clock + if midpoint_inference_time > 0: + midpoint_fps = 1 / midpoint_inference_time + + return PerformanceMetrics( + npu_cycles=int(cycles[PassCycles.Npu]), + sram_access_cycles=int(cycles[PassCycles.SramAccess]), + dram_access_cycles=int(cycles[PassCycles.DramAccess]), + on_chip_flash_access_cycles=int(cycles[PassCycles.OnChipFlashAccess]), + off_chip_flash_access_cycles=int(cycles[PassCycles.OffChipFlashAccess]), + total_cycles=int(cycles[PassCycles.Total]), + batch_inference_time=midpoint_inference_time * 1000, + inferences_per_second=midpoint_fps, + batch_size=optimized_model.nng.batch_size, + unknown_memory_area_size=memory_usage(MemArea.Unknown), + sram_memory_area_size=memory_usage(MemArea.Sram), + dram_memory_area_size=memory_usage(MemArea.Dram), + on_chip_flash_memory_area_size=memory_usage(MemArea.OnChipFlash), + off_chip_flash_memory_area_size=memory_usage(MemArea.OffChipFlash), + ) diff --git a/src/mlia/cli/config.py b/src/mlia/cli/config.py index 6ea9bb4..2d694dc 100644 --- a/src/mlia/cli/config.py +++ b/src/mlia/cli/config.py @@ -6,18 +6,19 @@ from __future__ import annotations import logging from functools import lru_cache -import mlia.backend.manager as backend_manager -from mlia.tools.metadata.common import DefaultInstallationManager -from mlia.tools.metadata.common import InstallationManager -from mlia.tools.metadata.corstone import get_corstone_installations -from mlia.tools.metadata.py_package import get_pypackage_backend_installations +from mlia.backend.corstone.install import get_corstone_installations +from mlia.backend.install import supported_backends +from mlia.backend.manager import DefaultInstallationManager +from mlia.backend.manager import InstallationManager +from mlia.backend.tosa_checker.install import get_tosa_backend_installation logger = logging.getLogger(__name__) def get_installation_manager(noninteractive: bool = False) -> InstallationManager: """Return installation manager.""" - backends = get_corstone_installations() + get_pypackage_backend_installations() + backends = get_corstone_installations() + backends.append(get_tosa_backend_installation()) return DefaultInstallationManager(backends, noninteractive=noninteractive) @@ -31,7 +32,7 @@ def get_available_backends() -> list[str]: manager = get_installation_manager() available_backends.extend( backend - for backend in backend_manager.supported_backends() + for backend in supported_backends() if manager.backend_installed(backend) ) diff --git a/src/mlia/devices/ethosu/config.py b/src/mlia/devices/ethosu/config.py index e44dcdc..f2e867e 100644 --- a/src/mlia/devices/ethosu/config.py +++ b/src/mlia/devices/ethosu/config.py @@ -6,9 +6,9 @@ from __future__ import annotations import logging from typing import Any +from mlia.backend.vela.compiler import resolve_compiler_config +from mlia.backend.vela.compiler import VelaCompilerOptions from mlia.devices.config import IPConfiguration -from mlia.tools.vela_wrapper import resolve_compiler_config -from mlia.tools.vela_wrapper import VelaCompilerOptions from mlia.utils.filesystem import get_profile from mlia.utils.filesystem import get_vela_config diff --git a/src/mlia/devices/ethosu/data_analysis.py b/src/mlia/devices/ethosu/data_analysis.py index 70b6f65..db89a5f 100644 --- a/src/mlia/devices/ethosu/data_analysis.py +++ b/src/mlia/devices/ethosu/data_analysis.py @@ -6,12 +6,12 @@ from __future__ import annotations from dataclasses import dataclass from functools import singledispatchmethod +from mlia.backend.vela.compat import Operators from mlia.core.common import DataItem from mlia.core.data_analysis import Fact from mlia.core.data_analysis import FactExtractor from mlia.devices.ethosu.performance import OptimizationPerformanceMetrics from mlia.nn.tensorflow.optimizations.select import OptimizationSettings -from mlia.tools.vela_wrapper import Operators @dataclass diff --git a/src/mlia/devices/ethosu/data_collection.py b/src/mlia/devices/ethosu/data_collection.py index c8d5293..d68eadb 100644 --- a/src/mlia/devices/ethosu/data_collection.py +++ b/src/mlia/devices/ethosu/data_collection.py @@ -6,6 +6,8 @@ from __future__ import annotations import logging from pathlib import Path +from mlia.backend.vela.compat import Operators +from mlia.backend.vela.compat import supported_operators from mlia.core.context import Context from mlia.core.data_collection import ContextAwareDataCollector from mlia.core.errors import FunctionalityNotSupportedError @@ -20,8 +22,6 @@ from mlia.nn.tensorflow.config import KerasModel from mlia.nn.tensorflow.optimizations.select import get_optimizer from mlia.nn.tensorflow.optimizations.select import OptimizationSettings from mlia.nn.tensorflow.utils import save_keras_model -from mlia.tools.vela_wrapper import Operators -from mlia.tools.vela_wrapper import supported_operators from mlia.utils.logging import log_action from mlia.utils.types import is_list_of diff --git a/src/mlia/devices/ethosu/handlers.py b/src/mlia/devices/ethosu/handlers.py index 48f9a2e..f010bdb 100644 --- a/src/mlia/devices/ethosu/handlers.py +++ b/src/mlia/devices/ethosu/handlers.py @@ -5,6 +5,7 @@ from __future__ import annotations import logging +from mlia.backend.vela.compat import Operators from mlia.core.events import CollectedDataEvent from mlia.core.handlers import WorkflowEventsHandler from mlia.core.typing import PathOrFileLike @@ -13,7 +14,6 @@ from mlia.devices.ethosu.events import EthosUAdvisorStartedEvent from mlia.devices.ethosu.performance import OptimizationPerformanceMetrics from mlia.devices.ethosu.performance import PerformanceMetrics from mlia.devices.ethosu.reporters import ethos_u_formatters -from mlia.tools.vela_wrapper import Operators logger = logging.getLogger(__name__) diff --git a/src/mlia/devices/ethosu/operators.py b/src/mlia/devices/ethosu/operators.py index 1a4ce8d..97c2b17 100644 --- a/src/mlia/devices/ethosu/operators.py +++ b/src/mlia/devices/ethosu/operators.py @@ -3,7 +3,7 @@ """Operators module.""" import logging -from mlia.tools import vela_wrapper +from mlia.backend.vela.compat import generate_supported_operators_report logger = logging.getLogger(__name__) @@ -11,4 +11,4 @@ logger = logging.getLogger(__name__) def report() -> None: """Generate supported operators report.""" - vela_wrapper.generate_supported_operators_report() + generate_supported_operators_report() diff --git a/src/mlia/devices/ethosu/performance.py b/src/mlia/devices/ethosu/performance.py index 431dd89..8051d6e 100644 --- a/src/mlia/devices/ethosu/performance.py +++ b/src/mlia/devices/ethosu/performance.py @@ -9,8 +9,13 @@ from enum import Enum from pathlib import Path from typing import Union -import mlia.backend.manager as backend_manager -import mlia.tools.vela_wrapper as vela +import mlia.backend.vela.compiler as vela_comp +import mlia.backend.vela.performance as vela_perf +from mlia.backend.corstone.performance import DeviceInfo +from mlia.backend.corstone.performance import estimate_performance +from mlia.backend.corstone.performance import ModelInfo +from mlia.backend.install import is_supported +from mlia.backend.install import supported_backends from mlia.core.context import Context from mlia.core.performance import PerformanceEstimator from mlia.devices.ethosu.config import EthosUConfiguration @@ -133,7 +138,7 @@ class VelaPerformanceEstimator( else model ) - vela_perf_metrics = vela.estimate_performance( + vela_perf_metrics = vela_perf.estimate_performance( model_path, self.device.compiler_options ) @@ -177,17 +182,17 @@ class CorstonePerformanceEstimator( f"{model_path.stem}_vela.tflite" ) - vela.optimize_model( + vela_comp.optimize_model( model_path, self.device.compiler_options, optimized_model_path ) - model_info = backend_manager.ModelInfo(model_path=optimized_model_path) - device_info = backend_manager.DeviceInfo( + model_info = ModelInfo(model_path=optimized_model_path) + device_info = DeviceInfo( device_type=self.device.target, # type: ignore mac=self.device.mac, ) - corstone_perf_metrics = backend_manager.estimate_performance( + corstone_perf_metrics = estimate_performance( model_info, device_info, self.backend ) @@ -218,10 +223,10 @@ class EthosUPerformanceEstimator( if backends is None: backends = ["Vela"] # Only Vela is always available as default for backend in backends: - if backend != "Vela" and not backend_manager.is_supported(backend): + if backend != "Vela" and not is_supported(backend): raise ValueError( f"Unsupported backend '{backend}'. " - f"Only 'Vela' and {backend_manager.supported_backends()} " + f"Only 'Vela' and {supported_backends()} " "are supported." ) self.backends = set(backends) @@ -241,7 +246,7 @@ class EthosUPerformanceEstimator( if backend == "Vela": vela_estimator = VelaPerformanceEstimator(self.context, self.device) memory_usage = vela_estimator.estimate(tflite_model) - elif backend in backend_manager.supported_backends(): + elif backend in supported_backends(): corstone_estimator = CorstonePerformanceEstimator( self.context, self.device, backend ) diff --git a/src/mlia/devices/ethosu/reporters.py b/src/mlia/devices/ethosu/reporters.py index f0fcb39..7ecaab1 100644 --- a/src/mlia/devices/ethosu/reporters.py +++ b/src/mlia/devices/ethosu/reporters.py @@ -7,6 +7,8 @@ from collections import defaultdict from typing import Any from typing import Callable +from mlia.backend.vela.compat import Operator +from mlia.backend.vela.compat import Operators from mlia.core.advice_generation import Advice from mlia.core.reporters import report_advice from mlia.core.reporting import BytesCell @@ -23,8 +25,6 @@ from mlia.core.reporting import SingleRow from mlia.core.reporting import Table from mlia.devices.ethosu.config import EthosUConfiguration from mlia.devices.ethosu.performance import PerformanceMetrics -from mlia.tools.vela_wrapper import Operator -from mlia.tools.vela_wrapper import Operators from mlia.utils.console import style_improvement from mlia.utils.types import is_list_of diff --git a/src/mlia/tools/__init__.py b/src/mlia/tools/__init__.py deleted file mode 100644 index 184e966..0000000 --- a/src/mlia/tools/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tools module.""" diff --git a/src/mlia/tools/metadata/__init__.py b/src/mlia/tools/metadata/__init__.py deleted file mode 100644 index f877e4f..0000000 --- a/src/mlia/tools/metadata/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Module for the tools metadata.""" diff --git a/src/mlia/tools/metadata/common.py b/src/mlia/tools/metadata/common.py deleted file mode 100644 index 5019da9..0000000 --- a/src/mlia/tools/metadata/common.py +++ /dev/null @@ -1,322 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Module for installation process.""" -from __future__ import annotations - -import logging -from abc import ABC -from abc import abstractmethod -from dataclasses import dataclass -from pathlib import Path -from typing import Callable -from typing import Union - -from mlia.core.errors import ConfigurationError -from mlia.core.errors import InternalError -from mlia.utils.misc import yes - -logger = logging.getLogger(__name__) - - -@dataclass -class InstallFromPath: - """Installation from the local path.""" - - backend_path: Path - - -@dataclass -class DownloadAndInstall: - """Download and install.""" - - eula_agreement: bool = True - - -InstallationType = Union[InstallFromPath, DownloadAndInstall] - - -class Installation(ABC): - """Base class for the installation process of the backends.""" - - @property - @abstractmethod - def name(self) -> str: - """Return name of the backend.""" - - @property - @abstractmethod - def description(self) -> str: - """Return description of the backend.""" - - @property - @abstractmethod - def could_be_installed(self) -> bool: - """Return true if backend could be installed in current environment.""" - - @property - @abstractmethod - def already_installed(self) -> bool: - """Return true if backend is already installed.""" - - @abstractmethod - def supports(self, install_type: InstallationType) -> bool: - """Return true if installation supports requested installation type.""" - - @abstractmethod - def install(self, install_type: InstallationType) -> None: - """Install the backend.""" - - @abstractmethod - def uninstall(self) -> None: - """Uninstall the backend.""" - - -InstallationFilter = Callable[[Installation], bool] - - -class AlreadyInstalledFilter: - """Filter for already installed backends.""" - - def __call__(self, installation: Installation) -> bool: - """Installation filter.""" - return installation.already_installed - - -class ReadyForInstallationFilter: - """Filter for ready to be installed backends.""" - - def __call__(self, installation: Installation) -> bool: - """Installation filter.""" - return installation.could_be_installed and not installation.already_installed - - -class SupportsInstallTypeFilter: - """Filter backends that support certain type of the installation.""" - - def __init__(self, installation_type: InstallationType) -> None: - """Init filter.""" - self.installation_type = installation_type - - def __call__(self, installation: Installation) -> bool: - """Installation filter.""" - return installation.supports(self.installation_type) - - -class SearchByNameFilter: - """Filter installation by name.""" - - def __init__(self, backend_name: str | None) -> None: - """Init filter.""" - self.backend_name = backend_name - - def __call__(self, installation: Installation) -> bool: - """Installation filter.""" - return ( - not self.backend_name - or installation.name.casefold() == self.backend_name.casefold() - ) - - -class InstallationManager(ABC): - """Helper class for managing installations.""" - - @abstractmethod - def install_from(self, backend_path: Path, backend_name: str, force: bool) -> None: - """Install backend from the local directory.""" - - @abstractmethod - def download_and_install( - self, backend_name: str, eula_agreement: bool, force: bool - ) -> None: - """Download and install backends.""" - - @abstractmethod - def show_env_details(self) -> None: - """Show environment details.""" - - @abstractmethod - def backend_installed(self, backend_name: str) -> bool: - """Return true if requested backend installed.""" - - @abstractmethod - def uninstall(self, backend_name: str) -> None: - """Delete the existing installation.""" - - -class InstallationFiltersMixin: - """Mixin for filtering installation based on different conditions.""" - - installations: list[Installation] - - def filter_by(self, *filters: InstallationFilter) -> list[Installation]: - """Filter installations.""" - return [ - installation - for installation in self.installations - if all(filter_(installation) for filter_ in filters) - ] - - def find_by_name(self, backend_name: str) -> list[Installation]: - """Return list of the backends filtered by name.""" - return self.filter_by(SearchByNameFilter(backend_name)) - - def already_installed(self, backend_name: str = None) -> list[Installation]: - """Return list of backends that are already installed.""" - return self.filter_by( - AlreadyInstalledFilter(), - SearchByNameFilter(backend_name), - ) - - def ready_for_installation(self) -> list[Installation]: - """Return list of the backends that could be installed.""" - return self.filter_by(ReadyForInstallationFilter()) - - -class DefaultInstallationManager(InstallationManager, InstallationFiltersMixin): - """Interactive installation manager.""" - - def __init__( - self, installations: list[Installation], noninteractive: bool = False - ) -> None: - """Init the manager.""" - self.installations = installations - self.noninteractive = noninteractive - - def _install( - self, - backend_name: str, - install_type: InstallationType, - prompt: Callable[[Installation], str], - force: bool, - ) -> None: - """Check metadata and install backend.""" - installs = self.find_by_name(backend_name) - - if not installs: - logger.info("Unknown backend '%s'.", backend_name) - logger.info( - "Please run command 'mlia-backend list' to get list of " - "supported backend names." - ) - - return - - if len(installs) > 1: - raise InternalError(f"More than one backend with name {backend_name} found") - - installation = installs[0] - if not installation.supports(install_type): - if isinstance(install_type, InstallFromPath): - logger.info( - "Backend '%s' could not be installed using path '%s'.", - installation.name, - install_type.backend_path, - ) - logger.info( - "Please check that '%s' is a valid path to the installed backend.", - install_type.backend_path, - ) - else: - logger.info( - "Backend '%s' could not be downloaded and installed", - installation.name, - ) - logger.info( - "Please refer to the project's documentation for more details." - ) - - return - - if installation.already_installed and not force: - logger.info("Backend '%s' is already installed.", installation.name) - logger.info("Please, consider using --force option.") - return - - proceed = self.noninteractive or yes(prompt(installation)) - if not proceed: - logger.info("%s installation canceled.", installation.name) - return - - if installation.already_installed and force: - logger.info( - "Force installing %s, so delete the existing " - "installed backend first.", - installation.name, - ) - installation.uninstall() - - installation.install(install_type) - logger.info("%s successfully installed.", installation.name) - - def install_from( - self, backend_path: Path, backend_name: str, force: bool = False - ) -> None: - """Install from the provided directory.""" - - def prompt(install: Installation) -> str: - return ( - f"{install.name} was found in {backend_path}. " - "Would you like to install it?" - ) - - install_type = InstallFromPath(backend_path) - self._install(backend_name, install_type, prompt, force) - - def download_and_install( - self, backend_name: str, eula_agreement: bool = True, force: bool = False - ) -> None: - """Download and install available backends.""" - - def prompt(install: Installation) -> str: - return f"Would you like to download and install {install.name}?" - - install_type = DownloadAndInstall(eula_agreement=eula_agreement) - self._install(backend_name, install_type, prompt, force) - - def show_env_details(self) -> None: - """Print current state of the execution environment.""" - if installed := self.already_installed(): - self._print_installation_list("Installed backends:", installed) - - if could_be_installed := self.ready_for_installation(): - self._print_installation_list( - "Following backends could be installed:", - could_be_installed, - new_section=bool(installed), - ) - - if not installed and not could_be_installed: - logger.info("No backends installed") - - @staticmethod - def _print_installation_list( - header: str, installations: list[Installation], new_section: bool = False - ) -> None: - """Print list of the installations.""" - logger.info("%s%s\n", "\n" if new_section else "", header) - - for installation in installations: - logger.info(" - %s", installation.name) - - def uninstall(self, backend_name: str) -> None: - """Uninstall the backend with name backend_name.""" - installations = self.already_installed(backend_name) - - if not installations: - raise ConfigurationError(f"Backend '{backend_name}' is not installed") - - if len(installations) != 1: - raise InternalError( - f"More than one installed backend with name {backend_name} found" - ) - - installation = installations[0] - installation.uninstall() - - logger.info("%s successfully uninstalled.", installation.name) - - def backend_installed(self, backend_name: str) -> bool: - """Return true if requested backend installed.""" - installations = self.already_installed(backend_name) - - return len(installations) == 1 diff --git a/src/mlia/tools/metadata/corstone.py b/src/mlia/tools/metadata/corstone.py deleted file mode 100644 index df2dcdb..0000000 --- a/src/mlia/tools/metadata/corstone.py +++ /dev/null @@ -1,417 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Module for Corstone based FVPs. - -The import of subprocess module raises a B404 bandit error. MLIA usage of -subprocess is needed and can be considered safe hence disabling the security -check. -""" -from __future__ import annotations - -import logging -import platform -import subprocess # nosec -import tarfile -from dataclasses import dataclass -from pathlib import Path -from typing import Callable -from typing import Iterable -from typing import Optional - -import mlia.backend.manager as backend_manager -from mlia.backend.system import remove_system -from mlia.tools.metadata.common import DownloadAndInstall -from mlia.tools.metadata.common import Installation -from mlia.tools.metadata.common import InstallationType -from mlia.tools.metadata.common import InstallFromPath -from mlia.utils.download import DownloadArtifact -from mlia.utils.filesystem import all_files_exist -from mlia.utils.filesystem import all_paths_valid -from mlia.utils.filesystem import copy_all -from mlia.utils.filesystem import get_mlia_resources -from mlia.utils.filesystem import temp_directory -from mlia.utils.filesystem import working_directory - - -logger = logging.getLogger(__name__) - - -@dataclass -class BackendInfo: - """Backend information.""" - - backend_path: Path - copy_source: bool = True - system_config: str | None = None - - -PathChecker = Callable[[Path], Optional[BackendInfo]] -BackendInstaller = Callable[[bool, Path], Path] - - -class BackendMetadata: - """Backend installation metadata.""" - - def __init__( - self, - name: str, - description: str, - system_config: str, - apps_resources: list[str], - fvp_dir_name: str, - download_artifact: DownloadArtifact | None, - supported_platforms: list[str] | None = None, - ) -> None: - """ - Initialize BackendMetadata. - - Members expected_systems and expected_apps are filled automatically. - """ - self.name = name - self.description = description - self.system_config = system_config - self.apps_resources = apps_resources - self.fvp_dir_name = fvp_dir_name - self.download_artifact = download_artifact - self.supported_platforms = supported_platforms - - self.expected_systems = backend_manager.get_all_system_names(name) - self.expected_apps = backend_manager.get_all_application_names(name) - - @property - def expected_resources(self) -> Iterable[Path]: - """Return list of expected resources.""" - resources = [self.system_config, *self.apps_resources] - - return (get_mlia_resources() / resource for resource in resources) - - @property - def supported_platform(self) -> bool: - """Return true if current platform supported.""" - if not self.supported_platforms: - return True - - return platform.system() in self.supported_platforms - - -class BackendInstallation(Installation): - """Backend installation.""" - - def __init__( - self, - backend_runner: backend_manager.BackendRunner, - metadata: BackendMetadata, - path_checker: PathChecker, - backend_installer: BackendInstaller | None, - ) -> None: - """Init the backend installation.""" - self.backend_runner = backend_runner - self.metadata = metadata - self.path_checker = path_checker - self.backend_installer = backend_installer - - @property - def name(self) -> str: - """Return name of the backend.""" - return self.metadata.name - - @property - def description(self) -> str: - """Return description of the backend.""" - return self.metadata.description - - @property - def already_installed(self) -> bool: - """Return true if backend already installed.""" - return self.backend_runner.all_installed( - self.metadata.expected_systems, self.metadata.expected_apps - ) - - @property - def could_be_installed(self) -> bool: - """Return true if backend could be installed.""" - if not self.metadata.supported_platform: - return False - - return all_paths_valid(self.metadata.expected_resources) - - def supports(self, install_type: InstallationType) -> bool: - """Return true if backends supported type of the installation.""" - if isinstance(install_type, DownloadAndInstall): - return self.metadata.download_artifact is not None - - if isinstance(install_type, InstallFromPath): - return self.path_checker(install_type.backend_path) is not None - - return False # type: ignore - - def install(self, install_type: InstallationType) -> None: - """Install the backend.""" - if isinstance(install_type, DownloadAndInstall): - download_artifact = self.metadata.download_artifact - assert download_artifact is not None, "No artifact provided" - - self.download_and_install(download_artifact, install_type.eula_agreement) - elif isinstance(install_type, InstallFromPath): - backend_path = self.path_checker(install_type.backend_path) - assert backend_path is not None, "Unable to resolve backend path" - - self.install_from(backend_path) - else: - raise Exception(f"Unable to install {install_type}") - - def install_from(self, backend_info: BackendInfo) -> None: - """Install backend from the directory.""" - mlia_resources = get_mlia_resources() - - with temp_directory() as tmpdir: - fvp_dist_dir = tmpdir / self.metadata.fvp_dir_name - - system_config = self.metadata.system_config - if backend_info.system_config: - system_config = backend_info.system_config - - resources_to_copy = [mlia_resources / system_config] - if backend_info.copy_source: - resources_to_copy.append(backend_info.backend_path) - - copy_all(*resources_to_copy, dest=fvp_dist_dir) - - self.backend_runner.install_system(fvp_dist_dir) - - for app in self.metadata.apps_resources: - self.backend_runner.install_application(mlia_resources / app) - - def download_and_install( - self, download_artifact: DownloadArtifact, eula_agrement: bool - ) -> None: - """Download and install the backend.""" - with temp_directory() as tmpdir: - try: - downloaded_to = download_artifact.download_to(tmpdir) - except Exception as err: - raise Exception("Unable to download backend artifact") from err - - with working_directory(tmpdir / "dist", create_dir=True) as dist_dir: - with tarfile.open(downloaded_to) as archive: - archive.extractall(dist_dir) - - assert self.backend_installer, ( - f"Backend '{self.metadata.name}' does not support " - "download and installation." - ) - backend_path = self.backend_installer(eula_agrement, dist_dir) - if self.path_checker(backend_path) is None: - raise Exception("Downloaded artifact has invalid structure") - - self.install(InstallFromPath(backend_path)) - - def uninstall(self) -> None: - """Uninstall the backend.""" - remove_system(self.metadata.fvp_dir_name) - - -class PackagePathChecker: - """Package path checker.""" - - def __init__( - self, expected_files: list[str], backend_subfolder: str | None = None - ) -> None: - """Init the path checker.""" - self.expected_files = expected_files - self.backend_subfolder = backend_subfolder - - def __call__(self, backend_path: Path) -> BackendInfo | None: - """Check if directory contains all expected files.""" - resolved_paths = (backend_path / file for file in self.expected_files) - if not all_files_exist(resolved_paths): - return None - - if self.backend_subfolder: - subfolder = backend_path / self.backend_subfolder - - if not subfolder.is_dir(): - return None - - return BackendInfo(subfolder) - - return BackendInfo(backend_path) - - -class StaticPathChecker: - """Static path checker.""" - - def __init__( - self, - static_backend_path: Path, - expected_files: list[str], - copy_source: bool = False, - system_config: str | None = None, - ) -> None: - """Init static path checker.""" - self.static_backend_path = static_backend_path - self.expected_files = expected_files - self.copy_source = copy_source - self.system_config = system_config - - def __call__(self, backend_path: Path) -> BackendInfo | None: - """Check if directory equals static backend path with all expected files.""" - if backend_path != self.static_backend_path: - return None - - resolved_paths = (backend_path / file for file in self.expected_files) - if not all_files_exist(resolved_paths): - return None - - return BackendInfo( - backend_path, - copy_source=self.copy_source, - system_config=self.system_config, - ) - - -class CompoundPathChecker: - """Compound path checker.""" - - def __init__(self, *path_checkers: PathChecker) -> None: - """Init compound path checker.""" - self.path_checkers = path_checkers - - def __call__(self, backend_path: Path) -> BackendInfo | None: - """Iterate over checkers and return first non empty backend info.""" - first_resolved_backend_info = ( - backend_info - for path_checker in self.path_checkers - if (backend_info := path_checker(backend_path)) is not None - ) - - return next(first_resolved_backend_info, None) - - -class Corstone300Installer: - """Helper class that wraps Corstone 300 installation logic.""" - - def __call__(self, eula_agreement: bool, dist_dir: Path) -> Path: - """Install Corstone-300 and return path to the models.""" - with working_directory(dist_dir): - install_dir = "corstone-300" - try: - fvp_install_cmd = [ - "./FVP_Corstone_SSE-300.sh", - "-q", - "-d", - install_dir, - ] - if not eula_agreement: - fvp_install_cmd += [ - "--nointeractive", - "--i-agree-to-the-contained-eula", - ] - - # The following line raises a B603 error for bandit. In this - # specific case, the input is pretty much static and cannot be - # changed byt the user hence disabling the security check for - # this instance - subprocess.check_call(fvp_install_cmd) # nosec - except subprocess.CalledProcessError as err: - raise Exception( - "Error occurred during Corstone-300 installation" - ) from err - - return dist_dir / install_dir - - -def get_corstone_300_installation() -> Installation: - """Get Corstone-300 installation.""" - corstone_300 = BackendInstallation( - backend_runner=backend_manager.BackendRunner(), - # pylint: disable=line-too-long - metadata=BackendMetadata( - name="Corstone-300", - description="Corstone-300 FVP", - system_config="backend_configs/systems/corstone-300/backend-config.json", - apps_resources=[], - fvp_dir_name="corstone_300", - download_artifact=DownloadArtifact( - name="Corstone-300 FVP", - url="https://developer.arm.com/-/media/Arm%20Developer%20Community/Downloads/OSS/FVP/Corstone-300/FVP_Corstone_SSE-300_11.16_26.tgz", - filename="FVP_Corstone_SSE-300_11.16_26.tgz", - version="11.16_26", - sha256_hash="e26139be756b5003a30d978c629de638aed1934d597dc24a17043d4708e934d7", - ), - supported_platforms=["Linux"], - ), - # pylint: enable=line-too-long - path_checker=CompoundPathChecker( - PackagePathChecker( - expected_files=[ - "models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U55", - "models/Linux64_GCC-6.4/FVP_Corstone_SSE-300_Ethos-U65", - ], - backend_subfolder="models/Linux64_GCC-6.4", - ), - StaticPathChecker( - static_backend_path=Path("/opt/VHT"), - expected_files=[ - "VHT_Corstone_SSE-300_Ethos-U55", - "VHT_Corstone_SSE-300_Ethos-U65", - ], - copy_source=False, - system_config=( - "backend_configs/systems/corstone-300-vht/backend-config.json" - ), - ), - ), - backend_installer=Corstone300Installer(), - ) - - return corstone_300 - - -def get_corstone_310_installation() -> Installation: - """Get Corstone-310 installation.""" - corstone_310 = BackendInstallation( - backend_runner=backend_manager.BackendRunner(), - # pylint: disable=line-too-long - metadata=BackendMetadata( - name="Corstone-310", - description="Corstone-310 FVP", - system_config="backend_configs/systems/corstone-310/backend-config.json", - apps_resources=[], - fvp_dir_name="corstone_310", - download_artifact=None, - supported_platforms=["Linux"], - ), - # pylint: enable=line-too-long - path_checker=CompoundPathChecker( - PackagePathChecker( - expected_files=[ - "models/Linux64_GCC-9.3/FVP_Corstone_SSE-310", - "models/Linux64_GCC-9.3/FVP_Corstone_SSE-310_Ethos-U65", - ], - backend_subfolder="models/Linux64_GCC-9.3", - ), - StaticPathChecker( - static_backend_path=Path("/opt/VHT"), - expected_files=[ - "VHT_Corstone_SSE-310", - "VHT_Corstone_SSE-310_Ethos-U65", - ], - copy_source=False, - system_config=( - "backend_configs/systems/corstone-310-vht/backend-config.json" - ), - ), - ), - backend_installer=None, - ) - - return corstone_310 - - -def get_corstone_installations() -> list[Installation]: - """Get Corstone installations.""" - return [ - get_corstone_300_installation(), - get_corstone_310_installation(), - ] diff --git a/src/mlia/tools/metadata/py_package.py b/src/mlia/tools/metadata/py_package.py deleted file mode 100644 index 716b62a..0000000 --- a/src/mlia/tools/metadata/py_package.py +++ /dev/null @@ -1,84 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Module for python package based installations.""" -from __future__ import annotations - -from mlia.tools.metadata.common import DownloadAndInstall -from mlia.tools.metadata.common import Installation -from mlia.tools.metadata.common import InstallationType -from mlia.utils.py_manager import get_package_manager - - -class PyPackageBackendInstallation(Installation): - """Backend based on the python package.""" - - def __init__( - self, - name: str, - description: str, - packages_to_install: list[str], - packages_to_uninstall: list[str], - expected_packages: list[str], - ) -> None: - """Init the backend installation.""" - self._name = name - self._description = description - self._packages_to_install = packages_to_install - self._packages_to_uninstall = packages_to_uninstall - self._expected_packages = expected_packages - - self.package_manager = get_package_manager() - - @property - def name(self) -> str: - """Return name of the backend.""" - return self._name - - @property - def description(self) -> str: - """Return description of the backend.""" - return self._description - - @property - def could_be_installed(self) -> bool: - """Check if backend could be installed.""" - return True - - @property - def already_installed(self) -> bool: - """Check if backend already installed.""" - return self.package_manager.packages_installed(self._expected_packages) - - def supports(self, install_type: InstallationType) -> bool: - """Return true if installation supports requested installation type.""" - return isinstance(install_type, DownloadAndInstall) - - def install(self, install_type: InstallationType) -> None: - """Install the backend.""" - if not self.supports(install_type): - raise Exception(f"Unsupported installation type {install_type}") - - self.package_manager.install(self._packages_to_install) - - def uninstall(self) -> None: - """Uninstall the backend.""" - self.package_manager.uninstall(self._packages_to_uninstall) - - -def get_tosa_backend_installation() -> Installation: - """Get TOSA backend installation.""" - return PyPackageBackendInstallation( - name="tosa-checker", - description="Tool to check if a ML model is compatible " - "with the TOSA specification", - packages_to_install=["mlia[tosa]"], - packages_to_uninstall=["tosa-checker"], - expected_packages=["tosa-checker"], - ) - - -def get_pypackage_backend_installations() -> list[Installation]: - """Return list of the backend installations based on python packages.""" - return [ - get_tosa_backend_installation(), - ] diff --git a/src/mlia/tools/vela_wrapper.py b/src/mlia/tools/vela_wrapper.py deleted file mode 100644 index 00d2f2c..0000000 --- a/src/mlia/tools/vela_wrapper.py +++ /dev/null @@ -1,497 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Vela wrapper module.""" -from __future__ import annotations - -import itertools -import logging -import sys -from dataclasses import dataclass -from pathlib import Path -from typing import Any -from typing import Literal - -import numpy as np -from ethosu.vela.architecture_features import ArchitectureFeatures -from ethosu.vela.compiler_driver import compiler_driver -from ethosu.vela.compiler_driver import CompilerOptions -from ethosu.vela.compiler_driver import TensorAllocator -from ethosu.vela.model_reader import ModelReaderOptions -from ethosu.vela.model_reader import read_model -from ethosu.vela.nn_graph import Graph -from ethosu.vela.nn_graph import NetworkType -from ethosu.vela.npu_performance import PassCycles -from ethosu.vela.operation import CustomType -from ethosu.vela.operation import Op -from ethosu.vela.scheduler import OptimizationStrategy -from ethosu.vela.scheduler import SchedulerOptions -from ethosu.vela.tensor import BandwidthDirection -from ethosu.vela.tensor import MemArea -from ethosu.vela.tensor import Tensor -from ethosu.vela.tflite_mapping import optype_to_builtintype -from ethosu.vela.tflite_model_semantic import TFLiteSemantic -from ethosu.vela.tflite_supported_operators import TFLiteSupportedOperators -from ethosu.vela.tflite_writer import write_tflite -from ethosu.vela.vela import generate_supported_ops - -from mlia.utils.logging import redirect_output - - -logger = logging.getLogger(__name__) - -VELA_INTERNAL_OPS = (Op.Placeholder, Op.SubgraphInput, Op.Const) - - -@dataclass -class PerformanceMetrics: # pylint: disable=too-many-instance-attributes - """Contains all the performance metrics Vela generates in a run.""" - - npu_cycles: int - sram_access_cycles: int - dram_access_cycles: int - on_chip_flash_access_cycles: int - off_chip_flash_access_cycles: int - total_cycles: int - batch_inference_time: float - inferences_per_second: float - batch_size: int - unknown_memory_area_size: int - sram_memory_area_size: int - dram_memory_area_size: int - on_chip_flash_memory_area_size: int - off_chip_flash_memory_area_size: int - - -@dataclass -class NpuSupported: - """Operator's npu supported attribute.""" - - supported: bool - reasons: list[tuple[str, str]] - - -@dataclass -class Operator: - """Model operator.""" - - name: str - op_type: str - run_on_npu: NpuSupported - - @property - def cpu_only(self) -> bool: - """Return true if operator is CPU only.""" - cpu_only_reasons = [("CPU only operator", "")] - return ( - not self.run_on_npu.supported - and self.run_on_npu.reasons == cpu_only_reasons - ) - - -@dataclass -class Operators: - """Model's operators.""" - - ops: list[Operator] - - @property - def npu_supported_ratio(self) -> float: - """Return NPU supported ratio.""" - total = self.total_number - npu_supported = self.npu_supported_number - - if total == 0 or npu_supported == 0: - return 0 - - return npu_supported / total - - @property - def npu_unsupported_ratio(self) -> float: - """Return NPU unsupported ratio.""" - return 1 - self.npu_supported_ratio - - @property - def total_number(self) -> int: - """Return total number of operators.""" - return len(self.ops) - - @property - def npu_supported_number(self) -> int: - """Return number of npu supported operators.""" - return sum(op.run_on_npu.supported for op in self.ops) - - -@dataclass -class Model: - """Model metadata.""" - - nng: Graph - network_type: NetworkType - - @property - def optimized(self) -> bool: - """Return true if model is already optimized.""" - return any( - op.attrs.get("custom_type") == CustomType.ExistingNpuOp - for sg in self.nng.subgraphs - for op in sg.get_all_ops() - ) - - -@dataclass -class OptimizedModel: - """Instance of the Vela optimized model.""" - - nng: Graph - arch: ArchitectureFeatures - compiler_options: CompilerOptions - scheduler_options: SchedulerOptions - - def save(self, output_filename: str | Path) -> None: - """Save instance of the optimized model to the file.""" - write_tflite(self.nng, output_filename) - - -AcceleratorConfigType = Literal[ - "ethos-u55-32", - "ethos-u55-64", - "ethos-u55-128", - "ethos-u55-256", - "ethos-u65-256", - "ethos-u65-512", -] - -TensorAllocatorType = Literal["LinearAlloc", "Greedy", "HillClimb"] - -OptimizationStrategyType = Literal["Performance", "Size"] - - -@dataclass -class VelaCompilerOptions: # pylint: disable=too-many-instance-attributes - """Vela compiler options.""" - - config_files: str | list[str] | None = None - system_config: str = ArchitectureFeatures.DEFAULT_CONFIG - memory_mode: str = ArchitectureFeatures.DEFAULT_CONFIG - accelerator_config: AcceleratorConfigType | None = None - max_block_dependency: int = ArchitectureFeatures.MAX_BLOCKDEP - arena_cache_size: int | None = None - tensor_allocator: TensorAllocatorType = "HillClimb" - cpu_tensor_alignment: int = Tensor.AllocationQuantum - optimization_strategy: OptimizationStrategyType = "Performance" - output_dir: str | None = None - recursion_limit: int = 1000 - - -class VelaCompiler: # pylint: disable=too-many-instance-attributes - """Vela compiler wrapper.""" - - def __init__(self, compiler_options: VelaCompilerOptions): - """Init Vela wrapper instance.""" - self.config_files = compiler_options.config_files - self.system_config = compiler_options.system_config - self.memory_mode = compiler_options.memory_mode - self.accelerator_config = compiler_options.accelerator_config - self.max_block_dependency = compiler_options.max_block_dependency - self.arena_cache_size = compiler_options.arena_cache_size - self.tensor_allocator = TensorAllocator[compiler_options.tensor_allocator] - self.cpu_tensor_alignment = compiler_options.cpu_tensor_alignment - self.optimization_strategy = OptimizationStrategy[ - compiler_options.optimization_strategy - ] - self.output_dir = compiler_options.output_dir - self.recursion_limit = compiler_options.recursion_limit - - sys.setrecursionlimit(self.recursion_limit) - - def read_model(self, model: str | Path) -> Model: - """Read model.""" - logger.debug("Read model %s", model) - - nng, network_type = self._read_model(model) - return Model(nng, network_type) - - def compile_model(self, model: str | Path | Model) -> OptimizedModel: - """Compile the model.""" - if isinstance(model, (str, Path)): - nng, network_type = self._read_model(model) - else: - nng, network_type = model.nng, NetworkType.TFLite - - if not nng: - raise Exception("Unable to read model") - - try: - arch = self._architecture_features() - compiler_options = self._compiler_options() - scheduler_options = self._scheduler_options() - - with redirect_output( - logger, stdout_level=logging.DEBUG, stderr_level=logging.DEBUG - ): - compiler_driver( - nng, arch, compiler_options, scheduler_options, network_type - ) - - return OptimizedModel(nng, arch, compiler_options, scheduler_options) - except (SystemExit, Exception) as err: - raise Exception("Model could not be optimized with Vela compiler") from err - - def get_config(self) -> dict[str, Any]: - """Get compiler configuration.""" - arch = self._architecture_features() - - memory_area = { - mem.name: { - "clock_scales": arch.memory_clock_scales[mem], - "burst_length": arch.memory_burst_length[mem], - "read_latency": arch.memory_latency[mem][BandwidthDirection.Read], - "write_latency": arch.memory_latency[mem][BandwidthDirection.Write], - } - for mem in ( - MemArea.Sram, - MemArea.Dram, - MemArea.OnChipFlash, - MemArea.OffChipFlash, - ) - } - - return { - "accelerator_config": arch.accelerator_config.value, - "system_config": arch.system_config, - "core_clock": arch.core_clock, - "axi0_port": arch.axi0_port.name, - "axi1_port": arch.axi1_port.name, - "memory_mode": arch.memory_mode, - "const_mem_area": arch.const_mem_area.name, - "arena_mem_area": arch.arena_mem_area.name, - "cache_mem_area": arch.cache_mem_area.name, - "arena_cache_size": arch.arena_cache_size, - "permanent_storage_mem_area": arch.permanent_storage_mem_area.name, - "feature_map_storage_mem_area": arch.feature_map_storage_mem_area.name, - "fast_storage_mem_area": arch.fast_storage_mem_area.name, - "memory_area": memory_area, - } - - @staticmethod - def _read_model(model: str | Path) -> tuple[Graph, NetworkType]: - """Read TensorFlow Lite model.""" - try: - model_path = str(model) if isinstance(model, Path) else model - - with redirect_output( - logger, stdout_level=logging.DEBUG, stderr_level=logging.DEBUG - ): - return read_model(model_path, ModelReaderOptions()) # type: ignore - except (SystemExit, Exception) as err: - raise Exception(f"Unable to read model {model_path}") from err - - def _architecture_features(self) -> ArchitectureFeatures: - """Return ArchitectureFeatures instance.""" - return ArchitectureFeatures( - vela_config_files=self.config_files, - accelerator_config=self.accelerator_config, - system_config=self.system_config, - memory_mode=self.memory_mode, - max_blockdep=self.max_block_dependency, - verbose_config=False, - arena_cache_size=self.arena_cache_size, - ) - - def _scheduler_options(self) -> SchedulerOptions: - """Return SchedulerOptions instance.""" - arch = self._architecture_features() - - return SchedulerOptions( - optimization_strategy=self.optimization_strategy, - sram_target=arch.arena_cache_size, - verbose_schedule=False, - ) - - def _compiler_options(self) -> CompilerOptions: - """Return CompilerOptions instance.""" - return CompilerOptions( - verbose_graph=False, - verbose_quantization=False, - verbose_packing=False, - verbose_tensor_purpose=False, - verbose_tensor_format=False, - verbose_allocation=False, - verbose_high_level_command_stream=False, - verbose_register_command_stream=False, - verbose_operators=False, - verbose_weights=False, - show_cpu_operations=False, - tensor_allocator=self.tensor_allocator, - timing=False, - output_dir=self.output_dir, - cpu_tensor_alignment=self.cpu_tensor_alignment, - ) - - -def resolve_compiler_config( - vela_compiler_options: VelaCompilerOptions, -) -> dict[str, Any]: - """Resolve passed compiler options. - - Vela has number of configuration parameters that being - resolved during passing compiler options. E.g. Vela - reads configuration parameters from vela.ini and fills - it's internal structures with resolved values (memory mode, - system mode, etc.). - - In order to get this information we need to create - instance of the Vela compiler first. - """ - vela_compiler = VelaCompiler(vela_compiler_options) - return vela_compiler.get_config() - - -def estimate_performance( - model_path: Path, compiler_options: VelaCompilerOptions -) -> PerformanceMetrics: - """Return performance estimations for the model/device. - - Logic for this function comes from Vela module stats_writer.py - """ - logger.debug( - "Estimate performance for the model %s on %s", - model_path, - compiler_options.accelerator_config, - ) - - vela_compiler = VelaCompiler(compiler_options) - - initial_model = vela_compiler.read_model(model_path) - if initial_model.optimized: - raise Exception("Unable to estimate performance for the given optimized model") - - optimized_model = vela_compiler.compile_model(initial_model) - - return _performance_metrics(optimized_model) - - -def optimize_model( - model_path: Path, compiler_options: VelaCompilerOptions, output_model_path: Path -) -> None: - """Optimize model and return it's path after optimization.""" - logger.debug( - "Optimize model %s for device %s", - model_path, - compiler_options.accelerator_config, - ) - - vela_compiler = VelaCompiler(compiler_options) - optimized_model = vela_compiler.compile_model(model_path) - - logger.debug("Save optimized model into %s", output_model_path) - optimized_model.save(output_model_path) - - -def _performance_metrics(optimized_model: OptimizedModel) -> PerformanceMetrics: - """Return performance metrics for optimized model.""" - cycles = optimized_model.nng.cycles - - def memory_usage(mem_area: MemArea) -> int: - """Get memory usage for the proviced memory area type.""" - memory_used: dict[MemArea, int] = optimized_model.nng.memory_used - bandwidths = optimized_model.nng.bandwidths - - return memory_used.get(mem_area, 0) if np.sum(bandwidths[mem_area]) > 0 else 0 - - midpoint_fps = np.nan - midpoint_inference_time = cycles[PassCycles.Total] / optimized_model.arch.core_clock - if midpoint_inference_time > 0: - midpoint_fps = 1 / midpoint_inference_time - - return PerformanceMetrics( - npu_cycles=int(cycles[PassCycles.Npu]), - sram_access_cycles=int(cycles[PassCycles.SramAccess]), - dram_access_cycles=int(cycles[PassCycles.DramAccess]), - on_chip_flash_access_cycles=int(cycles[PassCycles.OnChipFlashAccess]), - off_chip_flash_access_cycles=int(cycles[PassCycles.OffChipFlashAccess]), - total_cycles=int(cycles[PassCycles.Total]), - batch_inference_time=midpoint_inference_time * 1000, - inferences_per_second=midpoint_fps, - batch_size=optimized_model.nng.batch_size, - unknown_memory_area_size=memory_usage(MemArea.Unknown), - sram_memory_area_size=memory_usage(MemArea.Sram), - dram_memory_area_size=memory_usage(MemArea.Dram), - on_chip_flash_memory_area_size=memory_usage(MemArea.OnChipFlash), - off_chip_flash_memory_area_size=memory_usage(MemArea.OffChipFlash), - ) - - -def supported_operators( - model_path: Path, compiler_options: VelaCompilerOptions -) -> Operators: - """Return list of model's operators.""" - logger.debug("Check supported operators for the model %s", model_path) - - vela_compiler = VelaCompiler(compiler_options) - initial_model = vela_compiler.read_model(model_path) - - return Operators( - [ - Operator(op.name, optype_to_builtintype(op.type), run_on_npu(op)) - for sg in initial_model.nng.subgraphs - for op in sg.get_all_ops() - if op.type not in VELA_INTERNAL_OPS - ] - ) - - -def run_on_npu(operator: Op) -> NpuSupported: - """Return information if operator can run on NPU. - - Vela does a number of checks that can help establish whether - a particular operator is supported to run on NPU. - - There are two groups of checks: - - general TensorFlow Lite constraints - - operator specific constraints - - If an operator is not supported on NPU then this function - will return the reason of that. - - The reason is split in two parts: - - general description of why the operator cannot be placed on NPU - - details on the particular operator - """ - semantic_checker = TFLiteSemantic() - semantic_constraints = itertools.chain( - semantic_checker.generic_constraints, - semantic_checker.specific_constraints[operator.type], - ) - - for constraint in semantic_constraints: - op_valid, op_reason = constraint(operator) - if not op_valid: - return NpuSupported(False, [(constraint.__doc__, op_reason)]) - - if operator.type not in TFLiteSupportedOperators.supported_operators: - reasons = ( - [("CPU only operator", "")] - if operator.type not in VELA_INTERNAL_OPS - else [] - ) - - return NpuSupported(False, reasons) - - tflite_supported_operators = TFLiteSupportedOperators() - operation_constraints = itertools.chain( - tflite_supported_operators.generic_constraints, - tflite_supported_operators.specific_constraints[operator.type], - ) - for constraint in operation_constraints: - op_valid, op_reason = constraint(operator) - if not op_valid: - return NpuSupported(False, [(constraint.__doc__, op_reason)]) - - return NpuSupported(True, []) - - -def generate_supported_operators_report() -> None: - """Generate supported operators report in current working directory.""" - with redirect_output(logger): - generate_supported_ops() diff --git a/tests/conftest.py b/tests/conftest.py index b1f32dc..feb2aa0 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -10,12 +10,12 @@ from typing import Generator import pytest import tensorflow as tf +from mlia.backend.vela.compiler import optimize_model from mlia.core.context import ExecutionContext from mlia.devices.ethosu.config import EthosUConfiguration from mlia.nn.tensorflow.utils import convert_to_tflite from mlia.nn.tensorflow.utils import save_keras_model from mlia.nn.tensorflow.utils import save_tflite_model -from mlia.tools.vela_wrapper import optimize_model @pytest.fixture(scope="session", name="test_resources_path") @@ -68,7 +68,9 @@ def test_resources(monkeypatch: pytest.MonkeyPatch, test_resources_path: Path) - """Return path to the test resources.""" return test_resources_path / "backends" - monkeypatch.setattr("mlia.backend.fs.get_backend_resources", get_test_resources) + monkeypatch.setattr( + "mlia.backend.executor.fs.get_backend_resources", get_test_resources + ) yield diff --git a/tests/test_api.py b/tests/test_api.py index 6fa15b3..b9ab8ea 100644 --- a/tests/test_api.py +++ b/tests/test_api.py @@ -118,12 +118,12 @@ def test_get_advisor( [ [ "ethos-u55-128", - "mlia.tools.vela_wrapper.generate_supported_operators_report", + "mlia.devices.ethosu.operators.generate_supported_operators_report", None, ], [ "ethos-u65-256", - "mlia.tools.vela_wrapper.generate_supported_operators_report", + "mlia.devices.ethosu.operators.generate_supported_operators_report", None, ], [ diff --git a/tests/test_backend_application.py b/tests/test_backend_application.py deleted file mode 100644 index 478658b..0000000 --- a/tests/test_backend_application.py +++ /dev/null @@ -1,418 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for the application backend.""" -from __future__ import annotations - -from collections import Counter -from contextlib import ExitStack as does_not_raise -from pathlib import Path -from typing import Any -from unittest.mock import MagicMock - -import pytest - -from mlia.backend.application import Application -from mlia.backend.application import get_application -from mlia.backend.application import get_available_application_directory_names -from mlia.backend.application import get_available_applications -from mlia.backend.application import get_unique_application_names -from mlia.backend.application import install_application -from mlia.backend.application import load_applications -from mlia.backend.application import remove_application -from mlia.backend.common import Command -from mlia.backend.common import Param -from mlia.backend.common import UserParamConfig -from mlia.backend.config import ApplicationConfig -from mlia.backend.config import ExtendedApplicationConfig -from mlia.backend.config import NamedExecutionConfig - - -def test_get_available_application_directory_names() -> None: - """Test get_available_applicationss mocking get_resources.""" - directory_names = get_available_application_directory_names() - assert Counter(directory_names) == Counter( - [ - "application1", - "application2", - "application4", - "application5", - "application6", - ] - ) - - -def test_get_available_applications() -> None: - """Test get_available_applicationss mocking get_resources.""" - available_applications = get_available_applications() - - assert all(isinstance(s, Application) for s in available_applications) - assert all(s != 42 for s in available_applications) - assert len(available_applications) == 10 - # application_5 has multiply items with multiply supported systems - assert [str(s) for s in available_applications] == [ - "application_1", - "application_2", - "application_4", - "application_5", - "application_5", - "application_5A", - "application_5A", - "application_5B", - "application_5B", - "application_6", - ] - - -def test_get_unique_application_names() -> None: - """Test get_unique_application_names.""" - unique_names = get_unique_application_names() - - assert all(isinstance(s, str) for s in unique_names) - assert all(s for s in unique_names) - assert sorted(unique_names) == [ - "application_1", - "application_2", - "application_4", - "application_5", - "application_5A", - "application_5B", - "application_6", - ] - - -def test_get_application() -> None: - """Test get_application mocking get_resoures.""" - application = get_application("application_1") - if len(application) != 1: - pytest.fail("Unable to get application") - assert application[0].name == "application_1" - - application = get_application("unknown application") - assert len(application) == 0 - - -@pytest.mark.parametrize( - "source, call_count, expected_exception", - ( - ( - "archives/applications/application1.tar.gz", - 0, - pytest.raises( - Exception, match=r"Applications \[application_1\] are already installed" - ), - ), - ( - "various/applications/application_with_empty_config", - 0, - pytest.raises(Exception, match="No application definition found"), - ), - ( - "various/applications/application_with_wrong_config1", - 0, - pytest.raises(Exception, match="Unable to read application definition"), - ), - ( - "various/applications/application_with_wrong_config2", - 0, - pytest.raises(Exception, match="Unable to read application definition"), - ), - ( - "various/applications/application_with_wrong_config3", - 0, - pytest.raises(Exception, match="Unable to read application definition"), - ), - ("various/applications/application_with_valid_config", 1, does_not_raise()), - ( - "archives/applications/application3.tar.gz", - 0, - pytest.raises(Exception, match="Unable to read application definition"), - ), - ( - "backends/applications/application1", - 0, - pytest.raises( - Exception, match=r"Applications \[application_1\] are already installed" - ), - ), - ( - "backends/applications/application3", - 0, - pytest.raises(Exception, match="Unable to read application definition"), - ), - ), -) -def test_install_application( - monkeypatch: Any, - test_resources_path: Path, - source: str, - call_count: int, - expected_exception: Any, -) -> None: - """Test application install from archive.""" - mock_create_destination_and_install = MagicMock() - monkeypatch.setattr( - "mlia.backend.application.create_destination_and_install", - mock_create_destination_and_install, - ) - - with expected_exception: - install_application(test_resources_path / source) - assert mock_create_destination_and_install.call_count == call_count - - -def test_remove_application(monkeypatch: Any) -> None: - """Test application removal.""" - mock_remove_backend = MagicMock() - monkeypatch.setattr("mlia.backend.application.remove_backend", mock_remove_backend) - - remove_application("some_application_directory") - mock_remove_backend.assert_called_once() - - -def test_application_config_without_commands() -> None: - """Test application config without commands.""" - config = ApplicationConfig(name="application") - application = Application(config) - # pylint: disable=use-implicit-booleaness-not-comparison - assert application.commands == {} - - -class TestApplication: - """Test for application class methods.""" - - def test___eq__(self) -> None: - """Test overloaded __eq__ method.""" - config = ApplicationConfig( - # Application - supported_systems=["system1", "system2"], - # inherited from Backend - name="name", - description="description", - commands={}, - ) - application1 = Application(config) - application2 = Application(config) # Identical - assert application1 == application2 - - application3 = Application(config) # changed - # Change one single attribute so not equal, but same Type - setattr(application3, "supported_systems", ["somewhere/else"]) - assert application1 != application3 - - # different Type - application4 = "Not the Application you are looking for" - assert application1 != application4 - - application5 = Application(config) - # supported systems could be in any order - setattr(application5, "supported_systems", ["system2", "system1"]) - assert application1 == application5 - - def test_can_run_on(self) -> None: - """Test Application can run on.""" - config = ApplicationConfig(name="application", supported_systems=["System-A"]) - - application = Application(config) - assert application.can_run_on("System-A") - assert not application.can_run_on("System-B") - - applications = get_application("application_1", "System 1") - assert len(applications) == 1 - assert applications[0].can_run_on("System 1") - - def test_unable_to_create_application_without_name(self) -> None: - """Test that it is not possible to create application without name.""" - with pytest.raises(Exception, match="Name is empty"): - Application(ApplicationConfig()) - - def test_application_config_without_commands(self) -> None: - """Test application config without commands.""" - config = ApplicationConfig(name="application") - application = Application(config) - # pylint: disable=use-implicit-booleaness-not-comparison - assert application.commands == {} - - @pytest.mark.parametrize( - "config, expected_params", - ( - ( - ApplicationConfig( - name="application", - commands={"command": ["cmd {user_params:0} {user_params:1}"]}, - user_params={ - "command": [ - UserParamConfig( - name="--param1", description="param1", alias="param1" - ), - UserParamConfig( - name="--param2", description="param2", alias="param2" - ), - ] - }, - ), - [Param("--param1", "param1"), Param("--param2", "param2")], - ), - ( - ApplicationConfig( - name="application", - commands={"command": ["cmd {user_params:param1} {user_params:1}"]}, - user_params={ - "command": [ - UserParamConfig( - name="--param1", description="param1", alias="param1" - ), - UserParamConfig( - name="--param2", description="param2", alias="param2" - ), - ] - }, - ), - [Param("--param1", "param1"), Param("--param2", "param2")], - ), - ( - ApplicationConfig( - name="application", - commands={"command": ["cmd {user_params:param1}"]}, - user_params={ - "command": [ - UserParamConfig( - name="--param1", description="param1", alias="param1" - ), - UserParamConfig( - name="--param2", description="param2", alias="param2" - ), - ] - }, - ), - [Param("--param1", "param1")], - ), - ), - ) - def test_remove_unused_params( - self, config: ApplicationConfig, expected_params: list[Param] - ) -> None: - """Test mod remove_unused_parameter.""" - application = Application(config) - application.remove_unused_params() - assert application.commands["command"].params == expected_params - - -@pytest.mark.parametrize( - "config, expected_error", - ( - ( - ExtendedApplicationConfig(name="application"), - pytest.raises(Exception, match="No supported systems definition provided"), - ), - ( - ExtendedApplicationConfig( - name="application", supported_systems=[NamedExecutionConfig(name="")] - ), - pytest.raises( - Exception, - match="Unable to read supported system definition, name is missed", - ), - ), - ( - ExtendedApplicationConfig( - name="application", - supported_systems=[ - NamedExecutionConfig( - name="system", - commands={"command": ["cmd"]}, - user_params={"command": [UserParamConfig(name="param")]}, - ) - ], - commands={"command": ["cmd {user_params:0}"]}, - user_params={"command": [UserParamConfig(name="param")]}, - ), - pytest.raises( - Exception, match="Default parameters for command .* should have aliases" - ), - ), - ( - ExtendedApplicationConfig( - name="application", - supported_systems=[ - NamedExecutionConfig( - name="system", - commands={"command": ["cmd"]}, - user_params={"command": [UserParamConfig(name="param")]}, - ) - ], - commands={"command": ["cmd {user_params:0}"]}, - user_params={"command": [UserParamConfig(name="param", alias="param")]}, - ), - pytest.raises( - Exception, match="system parameters for command .* should have aliases" - ), - ), - ), -) -def test_load_application_exceptional_cases( - config: ExtendedApplicationConfig, expected_error: Any -) -> None: - """Test exceptional cases for application load function.""" - with expected_error: - load_applications(config) - - -def test_load_application() -> None: - """Test application load function. - - The main purpose of this test is to test configuration for application - for different systems. All configuration should be correctly - overridden if needed. - """ - application_5 = get_application("application_5") - assert len(application_5) == 2 - - default_commands = { - "build": Command(["default build command"]), - "run": Command(["default run command"]), - } - default_variables = {"var1": "value1", "var2": "value2"} - - application_5_0 = application_5[0] - assert application_5_0.supported_systems == ["System 1"] - assert application_5_0.commands == default_commands - assert application_5_0.variables == default_variables - - application_5_1 = application_5[1] - assert application_5_1.supported_systems == ["System 2"] - assert application_5_1.commands == application_5_1.commands - assert application_5_1.variables == default_variables - - application_5a = get_application("application_5A") - assert len(application_5a) == 2 - - application_5a_0 = application_5a[0] - assert application_5a_0.supported_systems == ["System 1"] - assert application_5a_0.commands == default_commands - assert application_5a_0.variables == {"var1": "new value1", "var2": "value2"} - - application_5a_1 = application_5a[1] - assert application_5a_1.supported_systems == ["System 2"] - assert application_5a_1.commands == { - "build": default_commands["build"], - "run": Command(["run command on system 2"]), - } - assert application_5a_1.variables == {"var1": "value1", "var2": "new value2"} - - application_5b = get_application("application_5B") - assert len(application_5b) == 2 - - application_5b_0 = application_5b[0] - assert application_5b_0.supported_systems == ["System 1"] - assert application_5b_0.commands == { - "build": Command(["default build command with value for var1 System1"]), - "run": Command(["default run command with value for var2 System1"]), - } - assert "non_used_command" not in application_5b_0.commands - - application_5b_1 = application_5b[1] - assert application_5b_1.supported_systems == ["System 2"] - assert application_5b_1.commands == { - "build": Command(["default build command with value for var1 System2"]), - "run": Command(["run command on system 2"], []), - } diff --git a/tests/test_backend_common.py b/tests/test_backend_common.py deleted file mode 100644 index 4f4853e..0000000 --- a/tests/test_backend_common.py +++ /dev/null @@ -1,480 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -# pylint: disable=protected-access -"""Tests for the common backend module.""" -from __future__ import annotations - -from contextlib import ExitStack as does_not_raise -from pathlib import Path -from typing import Any -from typing import cast -from typing import IO -from typing import List -from unittest.mock import MagicMock - -import pytest - -from mlia.backend.application import Application -from mlia.backend.common import Backend -from mlia.backend.common import BaseBackendConfig -from mlia.backend.common import Command -from mlia.backend.common import ConfigurationException -from mlia.backend.common import load_config -from mlia.backend.common import Param -from mlia.backend.common import parse_raw_parameter -from mlia.backend.common import remove_backend -from mlia.backend.config import ApplicationConfig -from mlia.backend.config import UserParamConfig -from mlia.backend.execution import ExecutionContext -from mlia.backend.execution import ParamResolver -from mlia.backend.system import System - - -@pytest.mark.parametrize( - "directory_name, expected_exception", - ( - ("some_dir", does_not_raise()), - (None, pytest.raises(Exception, match="No directory name provided")), - ), -) -def test_remove_backend( - monkeypatch: Any, directory_name: str, expected_exception: Any -) -> None: - """Test remove_backend function.""" - mock_remove_resource = MagicMock() - monkeypatch.setattr("mlia.backend.common.remove_resource", mock_remove_resource) - - with expected_exception: - remove_backend(directory_name, "applications") - - -@pytest.mark.parametrize( - "filename, expected_exception", - ( - ("application_config.json", does_not_raise()), - (None, pytest.raises(Exception, match="Unable to read config")), - ), -) -def test_load_config( - filename: str, expected_exception: Any, test_resources_path: Path, monkeypatch: Any -) -> None: - """Test load_config.""" - with expected_exception: - configs: list[Path | IO[bytes] | None] = ( - [None] - if not filename - else [ - # Ignore pylint warning as 'with' can't be used inside of a - # generator expression. - # pylint: disable=consider-using-with - open(test_resources_path / filename, "rb"), - test_resources_path / filename, - ] - ) - for config in configs: - json_mock = MagicMock() - monkeypatch.setattr("mlia.backend.common.json.load", json_mock) - load_config(config) - json_mock.assert_called_once() - - -class TestBackend: - """Test Backend class.""" - - def test___repr__(self) -> None: - """Test the representation of Backend instance.""" - backend = Backend( - BaseBackendConfig(name="Testing name", description="Testing description") - ) - assert str(backend) == "Testing name" - - def test__eq__(self) -> None: - """Test equality method with different cases.""" - backend1 = Backend(BaseBackendConfig(name="name", description="description")) - backend1.commands = {"command": Command(["command"])} - - backend2 = Backend(BaseBackendConfig(name="name", description="description")) - backend2.commands = {"command": Command(["command"])} - - backend3 = Backend( - BaseBackendConfig( - name="Ben", description="This is not the Backend you are looking for" - ) - ) - backend3.commands = {"wave": Command(["wave hand"])} - - backend4 = "Foo" # checking not isinstance(backend4, Backend) - - assert backend1 == backend2 - assert backend1 != backend3 - assert backend1 != backend4 - - @pytest.mark.parametrize( - "parameter, valid", - [ - ("--choice-param value_1", True), - ("--choice-param wrong_value", False), - ("--open-param something", True), - ("--wrong-param value", False), - ], - ) - def test_validate_parameter( - self, parameter: str, valid: bool, test_resources_path: Path - ) -> None: - """Test validate_parameter.""" - config = cast( - List[ApplicationConfig], - load_config(test_resources_path / "hello_world.json"), - ) - # The application configuration is a list of configurations so we need - # only the first one - # Exercise the validate_parameter test using the Application classe which - # inherits from Backend. - application = Application(config[0]) - assert application.validate_parameter("run", parameter) == valid - - def test_validate_parameter_with_invalid_command( - self, test_resources_path: Path - ) -> None: - """Test validate_parameter with an invalid command_name.""" - config = cast( - List[ApplicationConfig], - load_config(test_resources_path / "hello_world.json"), - ) - application = Application(config[0]) - with pytest.raises(AttributeError) as err: - # command foo does not exist, so raise an error - application.validate_parameter("foo", "bar") - assert "Unknown command: 'foo'" in str(err.value) - - def test_build_command(self) -> None: - """Test command building.""" - config = { - "name": "test", - "commands": { - "build": ["build {user_params:0} {user_params:1}"], - "run": ["run {user_params:0}"], - "post_run": ["post_run {application_params:0} on {system_params:0}"], - "some_command": ["Command with {variables:var_A}"], - "empty_command": [""], - }, - "user_params": { - "build": [ - { - "name": "choice_param_0=", - "values": [1, 2, 3], - "default_value": 1, - }, - {"name": "choice_param_1", "values": [3, 4, 5], "default_value": 3}, - {"name": "choice_param_3", "values": [6, 7, 8]}, - ], - "run": [{"name": "flag_param_0"}], - }, - "variables": {"var_A": "value for variable A"}, - } - - application, system = Application(config), System(config) # type: ignore - context = ExecutionContext( - app=application, - app_params=[], - system=system, - system_params=[], - ) - - param_resolver = ParamResolver(context) - - cmd = application.build_command( - "build", ["choice_param_0=2", "choice_param_1=4"], param_resolver - ) - assert cmd == ["build choice_param_0=2 choice_param_1 4"] - - cmd = application.build_command("build", ["choice_param_0=2"], param_resolver) - assert cmd == ["build choice_param_0=2 choice_param_1 3"] - - cmd = application.build_command( - "build", ["choice_param_0=2", "choice_param_3=7"], param_resolver - ) - assert cmd == ["build choice_param_0=2 choice_param_1 3"] - - with pytest.raises( - ConfigurationException, match="Command 'foo' could not be found." - ): - application.build_command("foo", [""], param_resolver) - - cmd = application.build_command("some_command", [], param_resolver) - assert cmd == ["Command with value for variable A"] - - cmd = application.build_command("empty_command", [], param_resolver) - assert cmd == [""] - - @pytest.mark.parametrize("class_", [Application, System]) - def test_build_command_unknown_variable(self, class_: type) -> None: - """Test that unable to construct backend with unknown variable.""" - with pytest.raises(Exception, match="Unknown variable var1"): - config = {"name": "test", "commands": {"run": ["run {variables:var1}"]}} - class_(config) - - @pytest.mark.parametrize( - "class_, config, expected_output", - [ - ( - Application, - { - "name": "test", - "commands": { - "build": ["build {user_params:0} {user_params:1}"], - "run": ["run {user_params:0}"], - }, - "user_params": { - "build": [ - { - "name": "choice_param_0=", - "values": ["a", "b", "c"], - "default_value": "a", - "alias": "param_1", - }, - { - "name": "choice_param_1", - "values": ["a", "b", "c"], - "default_value": "a", - "alias": "param_2", - }, - {"name": "choice_param_3", "values": ["a", "b", "c"]}, - ], - "run": [{"name": "flag_param_0"}], - }, - }, - [ - ( - "b", - Param( - name="choice_param_0=", - description="", - values=["a", "b", "c"], - default_value="a", - alias="param_1", - ), - ), - ( - "a", - Param( - name="choice_param_1", - description="", - values=["a", "b", "c"], - default_value="a", - alias="param_2", - ), - ), - ( - "c", - Param( - name="choice_param_3", - description="", - values=["a", "b", "c"], - ), - ), - ], - ), - (System, {"name": "test"}, []), - ], - ) - def test_resolved_parameters( - self, - class_: type, - config: dict, - expected_output: list[tuple[str | None, Param]], - ) -> None: - """Test command building.""" - backend = class_(config) - - params = backend.resolved_parameters( - "build", ["choice_param_0=b", "choice_param_3=c"] - ) - assert params == expected_output - - @pytest.mark.parametrize( - ["param_name", "user_param", "expected_value"], - [ - ( - "test_name", - "test_name=1234", - "1234", - ), # optional parameter using '=' - ( - "test_name", - "test_name 1234", - "1234", - ), # optional parameter using ' ' - ("test_name", "test_name", None), # flag - (None, "test_name=1234", "1234"), # positional parameter - ], - ) - def test_resolved_user_parameters( - self, param_name: str, user_param: str, expected_value: str - ) -> None: - """Test different variants to provide user parameters.""" - # A sample config providing one backend config - config = { - "name": "test_backend", - "commands": { - "test": ["user_param:test_param"], - }, - "user_params": { - "test": [UserParamConfig(name=param_name, alias="test_name")], - }, - } - backend = Backend(cast(BaseBackendConfig, config)) - params = backend.resolved_parameters( - command_name="test", user_params=[user_param] - ) - assert len(params) == 1 - value, param = params[0] - assert param_name == param.name - assert expected_value == value - - @pytest.mark.parametrize( - "input_param,expected", - [ - ("--param=1", ("--param", "1")), - ("--param 1", ("--param", "1")), - ("--flag", ("--flag", None)), - ], - ) - def test__parse_raw_parameter( - self, input_param: str, expected: tuple[str, str | None] - ) -> None: - """Test internal method of parsing a single raw parameter.""" - assert parse_raw_parameter(input_param) == expected - - -class TestParam: - """Test Param class.""" - - def test__eq__(self) -> None: - """Test equality method with different cases.""" - param1 = Param(name="test", description="desc", values=["values"]) - param2 = Param(name="test", description="desc", values=["values"]) - param3 = Param(name="test1", description="desc", values=["values"]) - param4 = object() - - assert param1 == param2 - assert param1 != param3 - assert param1 != param4 - - def test_get_details(self) -> None: - """Test get_details() method.""" - param1 = Param(name="test", description="desc", values=["values"]) - assert param1.get_details() == { - "name": "test", - "values": ["values"], - "description": "desc", - } - - def test_invalid(self) -> None: - """Test invalid use cases for the Param class.""" - with pytest.raises( - ConfigurationException, - match="Either name, alias or both must be set to identify a parameter.", - ): - Param(name=None, description="desc", values=["values"]) - - -class TestCommand: - """Test Command class.""" - - def test_get_details(self) -> None: - """Test get_details() method.""" - param1 = Param(name="test", description="desc", values=["values"]) - command1 = Command(command_strings=["echo test"], params=[param1]) - assert command1.get_details() == { - "command_strings": ["echo test"], - "user_params": [ - {"name": "test", "values": ["values"], "description": "desc"} - ], - } - - def test__eq__(self) -> None: - """Test equality method with different cases.""" - param1 = Param("test", "desc", ["values"]) - param2 = Param("test1", "desc1", ["values1"]) - command1 = Command(command_strings=["echo test"], params=[param1]) - command2 = Command(command_strings=["echo test"], params=[param1]) - command3 = Command(command_strings=["echo test"]) - command4 = Command(command_strings=["echo test"], params=[param2]) - command5 = object() - - assert command1 == command2 - assert command1 != command3 - assert command1 != command4 - assert command1 != command5 - - @pytest.mark.parametrize( - "params, expected_error", - [ - [[], does_not_raise()], - [[Param("param", "param description", [])], does_not_raise()], - [ - [ - Param("param", "param description", [], None, "alias"), - Param("param", "param description", [], None), - ], - does_not_raise(), - ], - [ - [ - Param("param1", "param1 description", [], None, "alias1"), - Param("param2", "param2 description", [], None, "alias2"), - ], - does_not_raise(), - ], - [ - [ - Param("param", "param description", [], None, "alias"), - Param("param", "param description", [], None, "alias"), - ], - pytest.raises(ConfigurationException, match="Non-unique aliases alias"), - ], - [ - [ - Param("alias", "param description", [], None, "alias1"), - Param("param", "param description", [], None, "alias"), - ], - pytest.raises( - ConfigurationException, - match="Aliases .* could not be used as parameter name", - ), - ], - [ - [ - Param("alias", "param description", [], None, "alias"), - Param("param1", "param1 description", [], None, "alias1"), - ], - does_not_raise(), - ], - [ - [ - Param("alias", "param description", [], None, "alias"), - Param("alias", "param1 description", [], None, "alias1"), - ], - pytest.raises( - ConfigurationException, - match="Aliases .* could not be used as parameter name", - ), - ], - [ - [ - Param("param1", "param1 description", [], None, "alias1"), - Param("param2", "param2 description", [], None, "alias1"), - Param("param3", "param3 description", [], None, "alias2"), - Param("param4", "param4 description", [], None, "alias2"), - ], - pytest.raises( - ConfigurationException, match="Non-unique aliases alias1, alias2" - ), - ], - ], - ) - def test_validate_params(self, params: list[Param], expected_error: Any) -> None: - """Test command validation function.""" - with expected_error: - Command([], params) diff --git a/tests/test_backend_corstone_install.py b/tests/test_backend_corstone_install.py new file mode 100644 index 0000000..3b05a49 --- /dev/null +++ b/tests/test_backend_corstone_install.py @@ -0,0 +1,490 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for Corstone related installation functions..""" +from __future__ import annotations + +import tarfile +from pathlib import Path +from typing import Iterable +from unittest.mock import MagicMock + +import pytest + +from mlia.backend.corstone.install import Corstone300Installer +from mlia.backend.corstone.install import get_corstone_300_installation +from mlia.backend.corstone.install import get_corstone_310_installation +from mlia.backend.corstone.install import get_corstone_installations +from mlia.backend.corstone.install import PackagePathChecker +from mlia.backend.corstone.install import StaticPathChecker +from mlia.backend.executor.runner import BackendRunner +from mlia.backend.install import BackendInfo +from mlia.backend.install import BackendInstallation +from mlia.backend.install import BackendInstaller +from mlia.backend.install import BackendMetadata +from mlia.backend.install import CompoundPathChecker +from mlia.backend.install import DownloadAndInstall +from mlia.backend.install import InstallFromPath +from mlia.backend.install import PathChecker + + +@pytest.fixture(name="test_mlia_resources") +def fixture_test_mlia_resources( + tmp_path: Path, monkeypatch: pytest.MonkeyPatch +) -> Path: + """Redirect MLIA resources resolution to the temp directory.""" + mlia_resources = tmp_path / "resources" + mlia_resources.mkdir() + + monkeypatch.setattr( + "mlia.backend.install.get_mlia_resources", + MagicMock(return_value=mlia_resources), + ) + + return mlia_resources + + +def get_backend_installation( # pylint: disable=too-many-arguments + backend_runner_mock: MagicMock = MagicMock(), + name: str = "test_name", + description: str = "test_description", + download_artifact: MagicMock | None = None, + path_checker: PathChecker = MagicMock(), + apps_resources: list[str] | None = None, + system_config: str | None = None, + backend_installer: BackendInstaller = MagicMock(), + supported_platforms: list[str] | None = None, +) -> BackendInstallation: + """Get backend installation.""" + return BackendInstallation( + backend_runner=backend_runner_mock, + metadata=BackendMetadata( + name=name, + description=description, + system_config=system_config or "", + apps_resources=apps_resources or [], + fvp_dir_name="sample_dir", + download_artifact=download_artifact, + supported_platforms=supported_platforms, + ), + path_checker=path_checker, + backend_installer=backend_installer, + ) + + +@pytest.mark.parametrize( + "platform, supported_platforms, expected_result", + [ + ["Linux", ["Linux"], True], + ["Linux", [], True], + ["Linux", None, True], + ["Windows", ["Linux"], False], + ], +) +def test_could_be_installed_depends_on_platform( + platform: str, + supported_platforms: list[str] | None, + expected_result: bool, + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Test that installation could not be installed on unsupported platform.""" + monkeypatch.setattr( + "mlia.backend.install.platform.system", + MagicMock(return_value=platform), + ) + monkeypatch.setattr( + "mlia.backend.install.all_paths_valid", + MagicMock(return_value=True), + ) + backend_runner_mock = MagicMock(spec=BackendRunner) + + installation = get_backend_installation( + backend_runner_mock, + supported_platforms=supported_platforms, + ) + assert installation.could_be_installed == expected_result + + +def test_get_corstone_installations() -> None: + """Test function get_corstone_installation.""" + installs = get_corstone_installations() + assert len(installs) == 2 + assert all(isinstance(install, BackendInstallation) for install in installs) + + +def test_backend_installation_metadata_resolving() -> None: + """Test backend installation metadata resolving.""" + backend_runner_mock = MagicMock(spec=BackendRunner) + installation = get_backend_installation(backend_runner_mock) + + assert installation.name == "test_name" + assert installation.description == "test_description" + + backend_runner_mock.all_installed.return_value = False + assert installation.already_installed is False + + assert installation.could_be_installed is True + + +def test_backend_installation_supported_install_types(tmp_path: Path) -> None: + """Test supported installation types.""" + installation_no_download_artifact = get_backend_installation() + assert installation_no_download_artifact.supports(DownloadAndInstall()) is False + + installation_with_download_artifact = get_backend_installation( + download_artifact=MagicMock() + ) + assert installation_with_download_artifact.supports(DownloadAndInstall()) is True + + path_checker_mock = MagicMock(return_value=BackendInfo(tmp_path)) + installation_can_install_from_dir = get_backend_installation( + path_checker=path_checker_mock + ) + assert installation_can_install_from_dir.supports(InstallFromPath(tmp_path)) is True + + any_installation = get_backend_installation() + assert any_installation.supports("unknown_install_type") is False # type: ignore + + +def test_backend_installation_install_wrong_type() -> None: + """Test that operation should fail if wrong install type provided.""" + with pytest.raises(Exception, match="Unable to install wrong_install_type"): + backend_runner_mock = MagicMock(spec=BackendRunner) + installation = get_backend_installation(backend_runner_mock) + + installation.install("wrong_install_type") # type: ignore + + +def test_backend_installation_install_from_path( + tmp_path: Path, test_mlia_resources: Path +) -> None: + """Test installation from the path.""" + system_config = test_mlia_resources / "example_config.json" + system_config.touch() + + sample_app = test_mlia_resources / "sample_app" + sample_app.mkdir() + + dist_dir = tmp_path / "dist" + dist_dir.mkdir() + + path_checker_mock = MagicMock(return_value=BackendInfo(dist_dir)) + + backend_runner_mock = MagicMock(spec=BackendRunner) + installation = get_backend_installation( + backend_runner_mock=backend_runner_mock, + path_checker=path_checker_mock, + apps_resources=[sample_app.name], + system_config="example_config.json", + ) + + assert installation.supports(InstallFromPath(dist_dir)) is True + installation.install(InstallFromPath(dist_dir)) + + backend_runner_mock.install_system.assert_called_once() + backend_runner_mock.install_application.assert_called_once_with(sample_app) + + +@pytest.mark.parametrize("copy_source", [True, False]) +def test_backend_installation_install_from_static_path( + tmp_path: Path, test_mlia_resources: Path, copy_source: bool +) -> None: + """Test installation from the predefined path.""" + system_config = test_mlia_resources / "example_config.json" + system_config.touch() + + custom_system_config = test_mlia_resources / "custom_config.json" + custom_system_config.touch() + + sample_app = test_mlia_resources / "sample_app" + sample_app.mkdir() + + predefined_location = tmp_path / "backend" + predefined_location.mkdir() + + predefined_location_file = predefined_location / "file.txt" + predefined_location_file.touch() + + predefined_location_dir = predefined_location / "folder" + predefined_location_dir.mkdir() + nested_file = predefined_location_dir / "nested_file.txt" + nested_file.touch() + + backend_runner_mock = MagicMock(spec=BackendRunner) + + def check_install_dir(install_dir: Path) -> None: + """Check content of the install dir.""" + assert install_dir.is_dir() + files = list(install_dir.iterdir()) + + if copy_source: + assert len(files) == 3 + assert all(install_dir / item in files for item in ["file.txt", "folder"]) + assert (install_dir / "folder/nested_file.txt").is_file() + else: + assert len(files) == 1 + + assert install_dir / "custom_config.json" in files + + backend_runner_mock.install_system.side_effect = check_install_dir + + installation = get_backend_installation( + backend_runner_mock=backend_runner_mock, + path_checker=StaticPathChecker( + predefined_location, + ["file.txt"], + copy_source=copy_source, + system_config=str(custom_system_config), + ), + apps_resources=[sample_app.name], + system_config="example_config.json", + ) + + assert installation.supports(InstallFromPath(predefined_location)) is True + installation.install(InstallFromPath(predefined_location)) + + backend_runner_mock.install_system.assert_called_once() + backend_runner_mock.install_application.assert_called_once_with(sample_app) + + +def create_sample_fvp_archive(tmp_path: Path) -> Path: + """Create sample FVP tar archive.""" + fvp_archive_dir = tmp_path / "archive" + fvp_archive_dir.mkdir() + + sample_file = fvp_archive_dir / "sample.txt" + sample_file.write_text("Sample file") + + sample_dir = fvp_archive_dir / "sample_dir" + sample_dir.mkdir() + + fvp_archive = tmp_path / "archive.tgz" + with tarfile.open(fvp_archive, "w:gz") as fvp_archive_tar: + fvp_archive_tar.add(fvp_archive_dir, arcname=fvp_archive_dir.name) + + return fvp_archive + + +def test_backend_installation_download_and_install( + test_mlia_resources: Path, tmp_path: Path +) -> None: + """Test downloading and installation process.""" + fvp_archive = create_sample_fvp_archive(tmp_path) + + system_config = test_mlia_resources / "example_config.json" + system_config.touch() + + download_artifact_mock = MagicMock() + download_artifact_mock.download_to.return_value = fvp_archive + + path_checker = PackagePathChecker(["archive/sample.txt"], "archive/sample_dir") + + def installer(_eula_agreement: bool, dist_dir: Path) -> Path: + """Sample installer.""" + return dist_dir + + backend_runner_mock = MagicMock(spec=BackendRunner) + installation = get_backend_installation( + backend_runner_mock, + download_artifact=download_artifact_mock, + backend_installer=installer, + path_checker=path_checker, + system_config="example_config.json", + ) + + installation.install(DownloadAndInstall()) + + backend_runner_mock.install_system.assert_called_once() + + +@pytest.mark.parametrize( + "dir_content, expected_result", + [ + [ + ["models/", "file1.txt", "file2.txt"], + "models", + ], + [ + ["file1.txt", "file2.txt"], + None, + ], + [ + ["models/", "file2.txt"], + None, + ], + ], +) +def test_corstone_path_checker_valid_path( + tmp_path: Path, dir_content: list[str], expected_result: str | None +) -> None: + """Test Corstone path checker valid scenario.""" + path_checker = PackagePathChecker(["file1.txt", "file2.txt"], "models") + + for item in dir_content: + if item.endswith("/"): + item_dir = tmp_path / item + item_dir.mkdir() + else: + item_file = tmp_path / item + item_file.touch() + + result = path_checker(tmp_path) + expected = ( + None if expected_result is None else BackendInfo(tmp_path / expected_result) + ) + + assert result == expected + + +@pytest.mark.parametrize("system_config", [None, "system_config"]) +@pytest.mark.parametrize("copy_source", [True, False]) +def test_static_path_checker( + tmp_path: Path, copy_source: bool, system_config: str | None +) -> None: + """Test static path checker.""" + static_checker = StaticPathChecker( + tmp_path, [], copy_source=copy_source, system_config=system_config + ) + assert static_checker(tmp_path) == BackendInfo( + tmp_path, copy_source=copy_source, system_config=system_config + ) + + +def test_static_path_checker_not_valid_path(tmp_path: Path) -> None: + """Test static path checker should return None if path is not valid.""" + static_checker = StaticPathChecker(tmp_path, ["file.txt"]) + assert static_checker(tmp_path / "backend") is None + + +def test_static_path_checker_not_valid_structure(tmp_path: Path) -> None: + """Test static path checker should return None if files are missing.""" + static_checker = StaticPathChecker(tmp_path, ["file.txt"]) + assert static_checker(tmp_path) is None + + missing_file = tmp_path / "file.txt" + missing_file.touch() + + assert static_checker(tmp_path) == BackendInfo(tmp_path, copy_source=False) + + +def test_compound_path_checker(tmp_path: Path) -> None: + """Test compound path checker.""" + path_checker_path_valid_path = MagicMock(return_value=BackendInfo(tmp_path)) + path_checker_path_not_valid_path = MagicMock(return_value=None) + + checker = CompoundPathChecker( + path_checker_path_valid_path, path_checker_path_not_valid_path + ) + assert checker(tmp_path) == BackendInfo(tmp_path) + + checker = CompoundPathChecker(path_checker_path_not_valid_path) + assert checker(tmp_path) is None + + +@pytest.mark.parametrize( + "eula_agreement, expected_command", + [ + [ + True, + [ + "./FVP_Corstone_SSE-300.sh", + "-q", + "-d", + "corstone-300", + ], + ], + [ + False, + [ + "./FVP_Corstone_SSE-300.sh", + "-q", + "-d", + "corstone-300", + "--nointeractive", + "--i-agree-to-the-contained-eula", + ], + ], + ], +) +def test_corstone_300_installer( + tmp_path: Path, + monkeypatch: pytest.MonkeyPatch, + eula_agreement: bool, + expected_command: list[str], +) -> None: + """Test Corstone-300 installer.""" + command_mock = MagicMock() + + monkeypatch.setattr( + "mlia.backend.corstone.install.subprocess.check_call", command_mock + ) + installer = Corstone300Installer() + result = installer(eula_agreement, tmp_path) + + command_mock.assert_called_once_with(expected_command) + assert result == tmp_path / "corstone-300" + + +@pytest.mark.parametrize( + "corstone_installation, expected_paths", + [ + [ + get_corstone_300_installation(), + { + "/opt/VHT/VHT_Corstone_SSE-300_Ethos-U55", + "/opt/VHT/VHT_Corstone_SSE-300_Ethos-U65", + }, + ], + [ + get_corstone_310_installation(), + { + "/opt/VHT/VHT_Corstone_SSE-310", + "/opt/VHT/VHT_Corstone_SSE-310_Ethos-U65", + }, + ], + ], +) +def test_corstone_vht_install( + corstone_installation: BackendInstallation, + expected_paths: set, + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Test if Corstone 300/310 could be installed from /opt/VHT.""" + + def _all_files_exist(paths: Iterable[Path]) -> bool: + """Check if all files exist.""" + pathset = {item.as_posix() for item in paths} + return pathset == expected_paths + + create_destination_and_install_mock = MagicMock() + + monkeypatch.setattr("mlia.backend.install.all_files_exist", _all_files_exist) + + monkeypatch.setattr( + "mlia.backend.executor.system.get_available_systems", lambda: [] + ) + + monkeypatch.setattr( + "mlia.backend.executor.system.create_destination_and_install", + create_destination_and_install_mock, + ) + + corstone_installation.install(InstallFromPath(Path("/opt/VHT"))) + + create_destination_and_install_mock.assert_called_once() + + +def test_corstone_uninstall( + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Test the uninstall function in Corstone.""" + remove_system_mock = MagicMock() + + monkeypatch.setattr( + "mlia.backend.install.remove_system", + remove_system_mock, + ) + + installation = get_corstone_300_installation() + + installation.uninstall() + remove_system_mock.assert_called_once_with("corstone_300") diff --git a/tests/test_backend_corstone_performance.py b/tests/test_backend_corstone_performance.py new file mode 100644 index 0000000..1734eb9 --- /dev/null +++ b/tests/test_backend_corstone_performance.py @@ -0,0 +1,519 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for module backend/manager.""" +from __future__ import annotations + +import base64 +import json +from contextlib import ExitStack as does_not_raise +from pathlib import Path +from typing import Any +from unittest.mock import MagicMock +from unittest.mock import PropertyMock + +import pytest + +from mlia.backend.corstone.performance import BackendRunner +from mlia.backend.corstone.performance import DeviceInfo +from mlia.backend.corstone.performance import estimate_performance +from mlia.backend.corstone.performance import GenericInferenceOutputParser +from mlia.backend.corstone.performance import GenericInferenceRunnerEthosU +from mlia.backend.corstone.performance import get_generic_runner +from mlia.backend.corstone.performance import ModelInfo +from mlia.backend.corstone.performance import PerformanceMetrics +from mlia.backend.executor.application import get_application +from mlia.backend.executor.execution import ExecutionContext +from mlia.backend.executor.output_consumer import Base64OutputConsumer +from mlia.backend.executor.system import get_system +from mlia.backend.install import get_system_name +from mlia.backend.install import is_supported +from mlia.backend.install import supported_backends + + +def _mock_encode_b64(data: dict[str, int]) -> str: + """ + Encode the given data into a mock base64-encoded string of JSON. + + This reproduces the base64 encoding done in the Corstone applications. + + JSON example: + + ```json + [{'count': 1, + 'profiling_group': 'Inference', + 'samples': [{'name': 'NPU IDLE', 'value': [612]}, + {'name': 'NPU AXI0_RD_DATA_BEAT_RECEIVED', 'value': [165872]}, + {'name': 'NPU AXI0_WR_DATA_BEAT_WRITTEN', 'value': [88712]}, + {'name': 'NPU AXI1_RD_DATA_BEAT_RECEIVED', 'value': [57540]}, + {'name': 'NPU ACTIVE', 'value': [520489]}, + {'name': 'NPU TOTAL', 'value': [521101]}]}] + ``` + """ + wrapped_data = [ + { + "count": 1, + "profiling_group": "Inference", + "samples": [ + {"name": name, "value": [value]} for name, value in data.items() + ], + } + ] + json_str = json.dumps(wrapped_data) + json_bytes = bytearray(json_str, encoding="utf-8") + json_b64 = base64.b64encode(json_bytes).decode("utf-8") + tag = Base64OutputConsumer.TAG_NAME + return f"<{tag}>{json_b64}" + + +@pytest.mark.parametrize( + "data, is_ready, result, missed_keys", + [ + ( + [], + False, + {}, + { + "npu_active_cycles", + "npu_axi0_rd_data_beat_received", + "npu_axi0_wr_data_beat_written", + "npu_axi1_rd_data_beat_received", + "npu_idle_cycles", + "npu_total_cycles", + }, + ), + ( + ["sample text"], + False, + {}, + { + "npu_active_cycles", + "npu_axi0_rd_data_beat_received", + "npu_axi0_wr_data_beat_written", + "npu_axi1_rd_data_beat_received", + "npu_idle_cycles", + "npu_total_cycles", + }, + ), + ( + [_mock_encode_b64({"NPU AXI0_RD_DATA_BEAT_RECEIVED": 123})], + False, + {"npu_axi0_rd_data_beat_received": 123}, + { + "npu_active_cycles", + "npu_axi0_wr_data_beat_written", + "npu_axi1_rd_data_beat_received", + "npu_idle_cycles", + "npu_total_cycles", + }, + ), + ( + [ + _mock_encode_b64( + { + "NPU AXI0_RD_DATA_BEAT_RECEIVED": 1, + "NPU AXI0_WR_DATA_BEAT_WRITTEN": 2, + "NPU AXI1_RD_DATA_BEAT_RECEIVED": 3, + "NPU ACTIVE": 4, + "NPU IDLE": 5, + "NPU TOTAL": 6, + } + ) + ], + True, + { + "npu_axi0_rd_data_beat_received": 1, + "npu_axi0_wr_data_beat_written": 2, + "npu_axi1_rd_data_beat_received": 3, + "npu_active_cycles": 4, + "npu_idle_cycles": 5, + "npu_total_cycles": 6, + }, + set(), + ), + ], +) +def test_generic_inference_output_parser( + data: dict[str, int], is_ready: bool, result: dict, missed_keys: set[str] +) -> None: + """Test generic runner output parser.""" + parser = GenericInferenceOutputParser() + + for line in data: + parser.feed(line) + + assert parser.is_ready() == is_ready + assert parser.result == result + assert parser.missed_keys() == missed_keys + + +@pytest.mark.parametrize( + "device, system, application, backend, expected_error", + [ + ( + DeviceInfo(device_type="ethos-u55", mac=32), + ("Corstone-300: Cortex-M55+Ethos-U55", True), + ("Generic Inference Runner: Ethos-U55", True), + "Corstone-300", + does_not_raise(), + ), + ( + DeviceInfo(device_type="ethos-u55", mac=32), + ("Corstone-300: Cortex-M55+Ethos-U55", False), + ("Generic Inference Runner: Ethos-U55", False), + "Corstone-300", + pytest.raises( + Exception, + match=r"System Corstone-300: Cortex-M55\+Ethos-U55 is not installed", + ), + ), + ( + DeviceInfo(device_type="ethos-u55", mac=32), + ("Corstone-300: Cortex-M55+Ethos-U55", True), + ("Generic Inference Runner: Ethos-U55", False), + "Corstone-300", + pytest.raises( + Exception, + match=r"Application Generic Inference Runner: Ethos-U55 " + r"for the system Corstone-300: Cortex-M55\+Ethos-U55 is not installed", + ), + ), + ( + DeviceInfo(device_type="ethos-u55", mac=32), + ("Corstone-310: Cortex-M85+Ethos-U55", True), + ("Generic Inference Runner: Ethos-U55", True), + "Corstone-310", + does_not_raise(), + ), + ( + DeviceInfo(device_type="ethos-u55", mac=32), + ("Corstone-310: Cortex-M85+Ethos-U55", False), + ("Generic Inference Runner: Ethos-U55", False), + "Corstone-310", + pytest.raises( + Exception, + match=r"System Corstone-310: Cortex-M85\+Ethos-U55 is not installed", + ), + ), + ( + DeviceInfo(device_type="ethos-u55", mac=32), + ("Corstone-310: Cortex-M85+Ethos-U55", True), + ("Generic Inference Runner: Ethos-U55", False), + "Corstone-310", + pytest.raises( + Exception, + match=r"Application Generic Inference Runner: Ethos-U55 " + r"for the system Corstone-310: Cortex-M85\+Ethos-U55 is not installed", + ), + ), + ( + DeviceInfo(device_type="ethos-u65", mac=512), + ("Corstone-300: Cortex-M55+Ethos-U65", True), + ("Generic Inference Runner: Ethos-U65", True), + "Corstone-300", + does_not_raise(), + ), + ( + DeviceInfo(device_type="ethos-u65", mac=512), + ("Corstone-300: Cortex-M55+Ethos-U65", False), + ("Generic Inference Runner: Ethos-U65", False), + "Corstone-300", + pytest.raises( + Exception, + match=r"System Corstone-300: Cortex-M55\+Ethos-U65 is not installed", + ), + ), + ( + DeviceInfo(device_type="ethos-u65", mac=512), + ("Corstone-300: Cortex-M55+Ethos-U65", True), + ("Generic Inference Runner: Ethos-U65", False), + "Corstone-300", + pytest.raises( + Exception, + match=r"Application Generic Inference Runner: Ethos-U65 " + r"for the system Corstone-300: Cortex-M55\+Ethos-U65 is not installed", + ), + ), + ( + DeviceInfo(device_type="ethos-u65", mac=512), + ("Corstone-310: Cortex-M85+Ethos-U65", True), + ("Generic Inference Runner: Ethos-U65", True), + "Corstone-310", + does_not_raise(), + ), + ( + DeviceInfo(device_type="ethos-u65", mac=512), + ("Corstone-310: Cortex-M85+Ethos-U65", False), + ("Generic Inference Runner: Ethos-U65", False), + "Corstone-310", + pytest.raises( + Exception, + match=r"System Corstone-310: Cortex-M85\+Ethos-U65 is not installed", + ), + ), + ( + DeviceInfo(device_type="ethos-u65", mac=512), + ("Corstone-310: Cortex-M85+Ethos-U65", True), + ("Generic Inference Runner: Ethos-U65", False), + "Corstone-310", + pytest.raises( + Exception, + match=r"Application Generic Inference Runner: Ethos-U65 " + r"for the system Corstone-310: Cortex-M85\+Ethos-U65 is not installed", + ), + ), + ( + DeviceInfo( + device_type="unknown_device", # type: ignore + mac=None, # type: ignore + ), + ("some_system", False), + ("some_application", False), + "some backend", + pytest.raises(Exception, match="Unsupported device unknown_device"), + ), + ], +) +def test_estimate_performance( + device: DeviceInfo, + system: tuple[str, bool], + application: tuple[str, bool], + backend: str, + expected_error: Any, + test_tflite_model: Path, + backend_runner: MagicMock, +) -> None: + """Test getting performance estimations.""" + system_name, system_installed = system + application_name, application_installed = application + + backend_runner.is_system_installed.return_value = system_installed + backend_runner.is_application_installed.return_value = application_installed + + mock_context = create_mock_context( + [ + _mock_encode_b64( + { + "NPU AXI0_RD_DATA_BEAT_RECEIVED": 1, + "NPU AXI0_WR_DATA_BEAT_WRITTEN": 2, + "NPU AXI1_RD_DATA_BEAT_RECEIVED": 3, + "NPU ACTIVE": 4, + "NPU IDLE": 5, + "NPU TOTAL": 6, + } + ) + ] + ) + + backend_runner.run_application.return_value = mock_context + + with expected_error: + perf_metrics = estimate_performance( + ModelInfo(test_tflite_model), device, backend + ) + + assert isinstance(perf_metrics, PerformanceMetrics) + assert perf_metrics == PerformanceMetrics( + npu_axi0_rd_data_beat_received=1, + npu_axi0_wr_data_beat_written=2, + npu_axi1_rd_data_beat_received=3, + npu_active_cycles=4, + npu_idle_cycles=5, + npu_total_cycles=6, + ) + + assert backend_runner.is_system_installed.called_once_with(system_name) + assert backend_runner.is_application_installed.called_once_with( + application_name, system_name + ) + + +@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310")) +def test_estimate_performance_insufficient_data( + backend_runner: MagicMock, test_tflite_model: Path, backend: str +) -> None: + """Test that performance could not be estimated when not all data presented.""" + backend_runner.is_system_installed.return_value = True + backend_runner.is_application_installed.return_value = True + + no_total_cycles_output = { + "NPU AXI0_RD_DATA_BEAT_RECEIVED": 1, + "NPU AXI0_WR_DATA_BEAT_WRITTEN": 2, + "NPU AXI1_RD_DATA_BEAT_RECEIVED": 3, + "NPU ACTIVE": 4, + "NPU IDLE": 5, + } + mock_context = create_mock_context([_mock_encode_b64(no_total_cycles_output)]) + + backend_runner.run_application.return_value = mock_context + + with pytest.raises( + Exception, match="Unable to get performance metrics, insufficient data" + ): + device = DeviceInfo(device_type="ethos-u55", mac=32) + estimate_performance(ModelInfo(test_tflite_model), device, backend) + + +def create_mock_process(stdout: list[str], stderr: list[str]) -> MagicMock: + """Mock underlying process.""" + mock_process = MagicMock() + mock_process.poll.return_value = 0 + type(mock_process).stdout = PropertyMock(return_value=iter(stdout)) + type(mock_process).stderr = PropertyMock(return_value=iter(stderr)) + return mock_process + + +def create_mock_context(stdout: list[str]) -> ExecutionContext: + """Mock ExecutionContext.""" + ctx = ExecutionContext( + app=get_application("application_1")[0], + app_params=[], + system=get_system("System 1"), + system_params=[], + ) + ctx.stdout = bytearray("\n".join(stdout).encode("utf-8")) + return ctx + + +@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310")) +def test_estimate_performance_invalid_output( + test_tflite_model: Path, backend_runner: MagicMock, backend: str +) -> None: + """Test estimation could not be done if inference produces unexpected output.""" + backend_runner.is_system_installed.return_value = True + backend_runner.is_application_installed.return_value = True + + mock_context = create_mock_context(["Something", "is", "wrong"]) + backend_runner.run_application.return_value = mock_context + + with pytest.raises(Exception, match="Unable to get performance metrics"): + estimate_performance( + ModelInfo(test_tflite_model), + DeviceInfo(device_type="ethos-u55", mac=256), + backend=backend, + ) + + +@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310")) +def test_get_generic_runner(backend: str) -> None: + """Test function get_generic_runner().""" + device_info = DeviceInfo("ethos-u55", 256) + + runner = get_generic_runner(device_info=device_info, backend=backend) + assert isinstance(runner, GenericInferenceRunnerEthosU) + + with pytest.raises(RuntimeError): + get_generic_runner(device_info=device_info, backend="UNKNOWN_BACKEND") + + +@pytest.mark.parametrize( + ("backend", "device_type"), + ( + ("Corstone-300", "ethos-u55"), + ("Corstone-300", "ethos-u65"), + ("Corstone-310", "ethos-u55"), + ), +) +def test_backend_support(backend: str, device_type: str) -> None: + """Test backend & device support.""" + assert is_supported(backend) + assert is_supported(backend, device_type) + + assert get_system_name(backend, device_type) + + assert backend in supported_backends() + + +class TestGenericInferenceRunnerEthosU: + """Test for the class GenericInferenceRunnerEthosU.""" + + @staticmethod + @pytest.mark.parametrize( + "device, backend, expected_system, expected_app", + [ + [ + DeviceInfo("ethos-u55", 256), + "Corstone-300", + "Corstone-300: Cortex-M55+Ethos-U55", + "Generic Inference Runner: Ethos-U55", + ], + [ + DeviceInfo("ethos-u65", 256), + "Corstone-300", + "Corstone-300: Cortex-M55+Ethos-U65", + "Generic Inference Runner: Ethos-U65", + ], + [ + DeviceInfo("ethos-u55", 256), + "Corstone-310", + "Corstone-310: Cortex-M85+Ethos-U55", + "Generic Inference Runner: Ethos-U55", + ], + [ + DeviceInfo("ethos-u65", 256), + "Corstone-310", + "Corstone-310: Cortex-M85+Ethos-U65", + "Generic Inference Runner: Ethos-U65", + ], + ], + ) + def test_artifact_resolver( + device: DeviceInfo, backend: str, expected_system: str, expected_app: str + ) -> None: + """Test artifact resolving based on the provided parameters.""" + generic_runner = get_generic_runner(device, backend) + assert isinstance(generic_runner, GenericInferenceRunnerEthosU) + + assert generic_runner.system_name == expected_system + assert generic_runner.app_name == expected_app + + @staticmethod + def test_artifact_resolver_unsupported_backend() -> None: + """Test that it should be not possible to use unsupported backends.""" + with pytest.raises( + RuntimeError, match="Unsupported device ethos-u65 for backend test_backend" + ): + get_generic_runner(DeviceInfo("ethos-u65", 256), "test_backend") + + @staticmethod + @pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310")) + def test_inference_should_fail_if_system_not_installed( + backend_runner: MagicMock, test_tflite_model: Path, backend: str + ) -> None: + """Test that inference should fail if system is not installed.""" + backend_runner.is_system_installed.return_value = False + + generic_runner = get_generic_runner(DeviceInfo("ethos-u55", 256), backend) + with pytest.raises( + Exception, + match=r"System Corstone-3[01]0: Cortex-M[58]5\+Ethos-U55 is not installed", + ): + generic_runner.run(ModelInfo(test_tflite_model), []) + + @staticmethod + @pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310")) + def test_inference_should_fail_is_apps_not_installed( + backend_runner: MagicMock, test_tflite_model: Path, backend: str + ) -> None: + """Test that inference should fail if apps are not installed.""" + backend_runner.is_system_installed.return_value = True + backend_runner.is_application_installed.return_value = False + + generic_runner = get_generic_runner(DeviceInfo("ethos-u55", 256), backend) + with pytest.raises( + Exception, + match="Application Generic Inference Runner: Ethos-U55" + r" for the system Corstone-3[01]0: Cortex-M[58]5\+Ethos-U55 is not " + r"installed", + ): + generic_runner.run(ModelInfo(test_tflite_model), []) + + +@pytest.fixture(name="backend_runner") +def fixture_backend_runner(monkeypatch: pytest.MonkeyPatch) -> MagicMock: + """Mock backend runner.""" + backend_runner_mock = MagicMock(spec=BackendRunner) + monkeypatch.setattr( + "mlia.backend.corstone.performance.get_backend_runner", + MagicMock(return_value=backend_runner_mock), + ) + return backend_runner_mock diff --git a/tests/test_backend_execution.py b/tests/test_backend_execution.py deleted file mode 100644 index e56a1b0..0000000 --- a/tests/test_backend_execution.py +++ /dev/null @@ -1,212 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Test backend execution module.""" -from pathlib import Path -from typing import Any -from unittest.mock import MagicMock - -import pytest - -from mlia.backend.application import Application -from mlia.backend.common import UserParamConfig -from mlia.backend.config import ApplicationConfig -from mlia.backend.config import SystemConfig -from mlia.backend.execution import ExecutionContext -from mlia.backend.execution import get_application_and_system -from mlia.backend.execution import get_application_by_name_and_system -from mlia.backend.execution import ParamResolver -from mlia.backend.execution import run_application -from mlia.backend.system import load_system - - -def test_context_param_resolver(tmpdir: Any) -> None: - """Test parameter resolving.""" - system_config_location = Path(tmpdir) / "system" - system_config_location.mkdir() - - application_config_location = Path(tmpdir) / "application" - application_config_location.mkdir() - - ctx = ExecutionContext( - app=Application( - ApplicationConfig( - name="test_application", - description="Test application", - config_location=application_config_location, - commands={ - "run": [ - "run_command1 {user_params:0}", - "run_command2 {user_params:1}", - ] - }, - variables={"var_1": "value for var_1"}, - user_params={ - "run": [ - UserParamConfig( - name="--param1", - description="Param 1", - default_value="123", - alias="param_1", - ), - UserParamConfig( - name="--param2", description="Param 2", default_value="456" - ), - UserParamConfig( - name="--param3", description="Param 3", alias="param_3" - ), - UserParamConfig( - name="--param4=", - description="Param 4", - default_value="456", - alias="param_4", - ), - UserParamConfig( - description="Param 5", - default_value="789", - alias="param_5", - ), - ] - }, - ) - ), - app_params=["--param2=789"], - system=load_system( - SystemConfig( - name="test_system", - description="Test system", - config_location=system_config_location, - commands={ - "build": ["build_command1 {user_params:0}"], - "run": ["run_command {application.commands.run:1}"], - }, - variables={"var_1": "value for var_1"}, - user_params={ - "build": [ - UserParamConfig( - name="--param1", description="Param 1", default_value="aaa" - ), - UserParamConfig(name="--param2", description="Param 2"), - ] - }, - ) - ), - system_params=["--param1=bbb"], - ) - - param_resolver = ParamResolver(ctx) - expected_values = { - "application.name": "test_application", - "application.description": "Test application", - "application.config_dir": str(application_config_location), - "application.commands.run:0": "run_command1 --param1 123", - "application.commands.run.params:0": "123", - "application.commands.run.params:param_1": "123", - "application.commands.run:1": "run_command2 --param2 789", - "application.commands.run.params:1": "789", - "application.variables:var_1": "value for var_1", - "system.name": "test_system", - "system.description": "Test system", - "system.config_dir": str(system_config_location), - "system.commands.build:0": "build_command1 --param1 bbb", - "system.commands.run:0": "run_command run_command2 --param2 789", - "system.commands.build.params:0": "bbb", - "system.variables:var_1": "value for var_1", - } - - for param, value in expected_values.items(): - assert param_resolver(param) == value - - expected_errors = { - "application.variables:var_2": pytest.raises( - Exception, match="Unknown variable var_2" - ), - "application.commands.clean:0": pytest.raises( - Exception, match="Command clean not found" - ), - "application.commands.run:2": pytest.raises( - Exception, match="Invalid index 2 for command run" - ), - "application.commands.run.params:5": pytest.raises( - Exception, match="Invalid parameter index 5 for command run" - ), - "application.commands.run.params:param_2": pytest.raises( - Exception, - match="No value for parameter with index or alias param_2 of command run", - ), - "UNKNOWN": pytest.raises( - Exception, match="Unable to resolve parameter UNKNOWN" - ), - "system.commands.build.params:1": pytest.raises( - Exception, - match="No value for parameter with index or alias 1 of command build", - ), - "system.commands.build:A": pytest.raises( - Exception, match="Bad command index A" - ), - "system.variables:var_2": pytest.raises( - Exception, match="Unknown variable var_2" - ), - } - for param, error in expected_errors.items(): - with error: - param_resolver(param) - - resolved_params = ctx.app.resolved_parameters("run", []) - expected_user_params = { - "user_params:0": "--param1 123", - "user_params:param_1": "--param1 123", - "user_params:2": "--param3", - "user_params:param_3": "--param3", - "user_params:3": "--param4=456", - "user_params:param_4": "--param4=456", - "user_params:param_5": "789", - } - for param, expected_value in expected_user_params.items(): - assert param_resolver(param, "run", resolved_params) == expected_value - - with pytest.raises( - Exception, match="Invalid index 5 for user params of command run" - ): - param_resolver("user_params:5", "run", resolved_params) - - with pytest.raises( - Exception, match="No user parameter for command 'run' with alias 'param_2'." - ): - param_resolver("user_params:param_2", "run", resolved_params) - - with pytest.raises(Exception, match="Unable to resolve user params"): - param_resolver("user_params:0", "", resolved_params) - - -def test_get_application_by_name_and_system(monkeypatch: Any) -> None: - """Test exceptional case for get_application_by_name_and_system.""" - monkeypatch.setattr( - "mlia.backend.execution.get_application", - MagicMock(return_value=[MagicMock(), MagicMock()]), - ) - - with pytest.raises( - ValueError, - match="Error during getting application test_application for the " - "system test_system", - ): - get_application_by_name_and_system("test_application", "test_system") - - -def test_get_application_and_system(monkeypatch: Any) -> None: - """Test exceptional case for get_application_and_system.""" - monkeypatch.setattr( - "mlia.backend.execution.get_system", MagicMock(return_value=None) - ) - - with pytest.raises(ValueError, match="System test_system is not found"): - get_application_and_system("test_application", "test_system") - - -def test_run_application() -> None: - """Test function run_application.""" - ctx = run_application("application_4", [], "System 4", []) - - assert isinstance(ctx, ExecutionContext) - assert ctx.stderr is not None and not ctx.stderr.decode() - assert ctx.stdout is not None and ctx.stdout.decode().strip() == "application_4" diff --git a/tests/test_backend_executor_application.py b/tests/test_backend_executor_application.py new file mode 100644 index 0000000..8962a0a --- /dev/null +++ b/tests/test_backend_executor_application.py @@ -0,0 +1,422 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for the application backend.""" +from __future__ import annotations + +from collections import Counter +from contextlib import ExitStack as does_not_raise +from pathlib import Path +from typing import Any +from unittest.mock import MagicMock + +import pytest + +from mlia.backend.executor.application import Application +from mlia.backend.executor.application import get_application +from mlia.backend.executor.application import ( + get_available_application_directory_names, +) +from mlia.backend.executor.application import get_available_applications +from mlia.backend.executor.application import get_unique_application_names +from mlia.backend.executor.application import install_application +from mlia.backend.executor.application import load_applications +from mlia.backend.executor.application import remove_application +from mlia.backend.executor.common import Command +from mlia.backend.executor.common import Param +from mlia.backend.executor.common import UserParamConfig +from mlia.backend.executor.config import ApplicationConfig +from mlia.backend.executor.config import ExtendedApplicationConfig +from mlia.backend.executor.config import NamedExecutionConfig + + +def test_get_available_application_directory_names() -> None: + """Test get_available_applicationss mocking get_resources.""" + directory_names = get_available_application_directory_names() + assert Counter(directory_names) == Counter( + [ + "application1", + "application2", + "application4", + "application5", + "application6", + ] + ) + + +def test_get_available_applications() -> None: + """Test get_available_applicationss mocking get_resources.""" + available_applications = get_available_applications() + + assert all(isinstance(s, Application) for s in available_applications) + assert all(s != 42 for s in available_applications) + assert len(available_applications) == 10 + # application_5 has multiply items with multiply supported systems + assert [str(s) for s in available_applications] == [ + "application_1", + "application_2", + "application_4", + "application_5", + "application_5", + "application_5A", + "application_5A", + "application_5B", + "application_5B", + "application_6", + ] + + +def test_get_unique_application_names() -> None: + """Test get_unique_application_names.""" + unique_names = get_unique_application_names() + + assert all(isinstance(s, str) for s in unique_names) + assert all(s for s in unique_names) + assert sorted(unique_names) == [ + "application_1", + "application_2", + "application_4", + "application_5", + "application_5A", + "application_5B", + "application_6", + ] + + +def test_get_application() -> None: + """Test get_application mocking get_resoures.""" + application = get_application("application_1") + if len(application) != 1: + pytest.fail("Unable to get application") + assert application[0].name == "application_1" + + application = get_application("unknown application") + assert len(application) == 0 + + +@pytest.mark.parametrize( + "source, call_count, expected_exception", + ( + ( + "archives/applications/application1.tar.gz", + 0, + pytest.raises( + Exception, match=r"Applications \[application_1\] are already installed" + ), + ), + ( + "various/applications/application_with_empty_config", + 0, + pytest.raises(Exception, match="No application definition found"), + ), + ( + "various/applications/application_with_wrong_config1", + 0, + pytest.raises(Exception, match="Unable to read application definition"), + ), + ( + "various/applications/application_with_wrong_config2", + 0, + pytest.raises(Exception, match="Unable to read application definition"), + ), + ( + "various/applications/application_with_wrong_config3", + 0, + pytest.raises(Exception, match="Unable to read application definition"), + ), + ("various/applications/application_with_valid_config", 1, does_not_raise()), + ( + "archives/applications/application3.tar.gz", + 0, + pytest.raises(Exception, match="Unable to read application definition"), + ), + ( + "backends/applications/application1", + 0, + pytest.raises( + Exception, match=r"Applications \[application_1\] are already installed" + ), + ), + ( + "backends/applications/application3", + 0, + pytest.raises(Exception, match="Unable to read application definition"), + ), + ), +) +def test_install_application( + monkeypatch: Any, + test_resources_path: Path, + source: str, + call_count: int, + expected_exception: Any, +) -> None: + """Test application install from archive.""" + mock_create_destination_and_install = MagicMock() + monkeypatch.setattr( + "mlia.backend.executor.application.create_destination_and_install", + mock_create_destination_and_install, + ) + + with expected_exception: + install_application(test_resources_path / source) + assert mock_create_destination_and_install.call_count == call_count + + +def test_remove_application(monkeypatch: Any) -> None: + """Test application removal.""" + mock_remove_backend = MagicMock() + monkeypatch.setattr( + "mlia.backend.executor.application.remove_backend", mock_remove_backend + ) + + remove_application("some_application_directory") + mock_remove_backend.assert_called_once() + + +def test_application_config_without_commands() -> None: + """Test application config without commands.""" + config = ApplicationConfig(name="application") + application = Application(config) + # pylint: disable=use-implicit-booleaness-not-comparison + assert application.commands == {} + + +class TestApplication: + """Test for application class methods.""" + + def test___eq__(self) -> None: + """Test overloaded __eq__ method.""" + config = ApplicationConfig( + # Application + supported_systems=["system1", "system2"], + # inherited from Backend + name="name", + description="description", + commands={}, + ) + application1 = Application(config) + application2 = Application(config) # Identical + assert application1 == application2 + + application3 = Application(config) # changed + # Change one single attribute so not equal, but same Type + setattr(application3, "supported_systems", ["somewhere/else"]) + assert application1 != application3 + + # different Type + application4 = "Not the Application you are looking for" + assert application1 != application4 + + application5 = Application(config) + # supported systems could be in any order + setattr(application5, "supported_systems", ["system2", "system1"]) + assert application1 == application5 + + def test_can_run_on(self) -> None: + """Test Application can run on.""" + config = ApplicationConfig(name="application", supported_systems=["System-A"]) + + application = Application(config) + assert application.can_run_on("System-A") + assert not application.can_run_on("System-B") + + applications = get_application("application_1", "System 1") + assert len(applications) == 1 + assert applications[0].can_run_on("System 1") + + def test_unable_to_create_application_without_name(self) -> None: + """Test that it is not possible to create application without name.""" + with pytest.raises(Exception, match="Name is empty"): + Application(ApplicationConfig()) + + def test_application_config_without_commands(self) -> None: + """Test application config without commands.""" + config = ApplicationConfig(name="application") + application = Application(config) + # pylint: disable=use-implicit-booleaness-not-comparison + assert application.commands == {} + + @pytest.mark.parametrize( + "config, expected_params", + ( + ( + ApplicationConfig( + name="application", + commands={"command": ["cmd {user_params:0} {user_params:1}"]}, + user_params={ + "command": [ + UserParamConfig( + name="--param1", description="param1", alias="param1" + ), + UserParamConfig( + name="--param2", description="param2", alias="param2" + ), + ] + }, + ), + [Param("--param1", "param1"), Param("--param2", "param2")], + ), + ( + ApplicationConfig( + name="application", + commands={"command": ["cmd {user_params:param1} {user_params:1}"]}, + user_params={ + "command": [ + UserParamConfig( + name="--param1", description="param1", alias="param1" + ), + UserParamConfig( + name="--param2", description="param2", alias="param2" + ), + ] + }, + ), + [Param("--param1", "param1"), Param("--param2", "param2")], + ), + ( + ApplicationConfig( + name="application", + commands={"command": ["cmd {user_params:param1}"]}, + user_params={ + "command": [ + UserParamConfig( + name="--param1", description="param1", alias="param1" + ), + UserParamConfig( + name="--param2", description="param2", alias="param2" + ), + ] + }, + ), + [Param("--param1", "param1")], + ), + ), + ) + def test_remove_unused_params( + self, config: ApplicationConfig, expected_params: list[Param] + ) -> None: + """Test mod remove_unused_parameter.""" + application = Application(config) + application.remove_unused_params() + assert application.commands["command"].params == expected_params + + +@pytest.mark.parametrize( + "config, expected_error", + ( + ( + ExtendedApplicationConfig(name="application"), + pytest.raises(Exception, match="No supported systems definition provided"), + ), + ( + ExtendedApplicationConfig( + name="application", supported_systems=[NamedExecutionConfig(name="")] + ), + pytest.raises( + Exception, + match="Unable to read supported system definition, name is missed", + ), + ), + ( + ExtendedApplicationConfig( + name="application", + supported_systems=[ + NamedExecutionConfig( + name="system", + commands={"command": ["cmd"]}, + user_params={"command": [UserParamConfig(name="param")]}, + ) + ], + commands={"command": ["cmd {user_params:0}"]}, + user_params={"command": [UserParamConfig(name="param")]}, + ), + pytest.raises( + Exception, match="Default parameters for command .* should have aliases" + ), + ), + ( + ExtendedApplicationConfig( + name="application", + supported_systems=[ + NamedExecutionConfig( + name="system", + commands={"command": ["cmd"]}, + user_params={"command": [UserParamConfig(name="param")]}, + ) + ], + commands={"command": ["cmd {user_params:0}"]}, + user_params={"command": [UserParamConfig(name="param", alias="param")]}, + ), + pytest.raises( + Exception, match="system parameters for command .* should have aliases" + ), + ), + ), +) +def test_load_application_exceptional_cases( + config: ExtendedApplicationConfig, expected_error: Any +) -> None: + """Test exceptional cases for application load function.""" + with expected_error: + load_applications(config) + + +def test_load_application() -> None: + """Test application load function. + + The main purpose of this test is to test configuration for application + for different systems. All configuration should be correctly + overridden if needed. + """ + application_5 = get_application("application_5") + assert len(application_5) == 2 + + default_commands = { + "build": Command(["default build command"]), + "run": Command(["default run command"]), + } + default_variables = {"var1": "value1", "var2": "value2"} + + application_5_0 = application_5[0] + assert application_5_0.supported_systems == ["System 1"] + assert application_5_0.commands == default_commands + assert application_5_0.variables == default_variables + + application_5_1 = application_5[1] + assert application_5_1.supported_systems == ["System 2"] + assert application_5_1.commands == application_5_1.commands + assert application_5_1.variables == default_variables + + application_5a = get_application("application_5A") + assert len(application_5a) == 2 + + application_5a_0 = application_5a[0] + assert application_5a_0.supported_systems == ["System 1"] + assert application_5a_0.commands == default_commands + assert application_5a_0.variables == {"var1": "new value1", "var2": "value2"} + + application_5a_1 = application_5a[1] + assert application_5a_1.supported_systems == ["System 2"] + assert application_5a_1.commands == { + "build": default_commands["build"], + "run": Command(["run command on system 2"]), + } + assert application_5a_1.variables == {"var1": "value1", "var2": "new value2"} + + application_5b = get_application("application_5B") + assert len(application_5b) == 2 + + application_5b_0 = application_5b[0] + assert application_5b_0.supported_systems == ["System 1"] + assert application_5b_0.commands == { + "build": Command(["default build command with value for var1 System1"]), + "run": Command(["default run command with value for var2 System1"]), + } + assert "non_used_command" not in application_5b_0.commands + + application_5b_1 = application_5b[1] + assert application_5b_1.supported_systems == ["System 2"] + assert application_5b_1.commands == { + "build": Command(["default build command with value for var1 System2"]), + "run": Command(["run command on system 2"], []), + } diff --git a/tests/test_backend_executor_common.py b/tests/test_backend_executor_common.py new file mode 100644 index 0000000..e881462 --- /dev/null +++ b/tests/test_backend_executor_common.py @@ -0,0 +1,482 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +# pylint: disable=protected-access +"""Tests for the common backend module.""" +from __future__ import annotations + +from contextlib import ExitStack as does_not_raise +from pathlib import Path +from typing import Any +from typing import cast +from typing import IO +from typing import List +from unittest.mock import MagicMock + +import pytest + +from mlia.backend.executor.application import Application +from mlia.backend.executor.common import Backend +from mlia.backend.executor.common import BaseBackendConfig +from mlia.backend.executor.common import Command +from mlia.backend.executor.common import ConfigurationException +from mlia.backend.executor.common import load_config +from mlia.backend.executor.common import Param +from mlia.backend.executor.common import parse_raw_parameter +from mlia.backend.executor.common import remove_backend +from mlia.backend.executor.config import ApplicationConfig +from mlia.backend.executor.config import UserParamConfig +from mlia.backend.executor.execution import ExecutionContext +from mlia.backend.executor.execution import ParamResolver +from mlia.backend.executor.system import System + + +@pytest.mark.parametrize( + "directory_name, expected_exception", + ( + ("some_dir", does_not_raise()), + (None, pytest.raises(Exception, match="No directory name provided")), + ), +) +def test_remove_backend( + monkeypatch: Any, directory_name: str, expected_exception: Any +) -> None: + """Test remove_backend function.""" + mock_remove_resource = MagicMock() + monkeypatch.setattr( + "mlia.backend.executor.common.remove_resource", mock_remove_resource + ) + + with expected_exception: + remove_backend(directory_name, "applications") + + +@pytest.mark.parametrize( + "filename, expected_exception", + ( + ("application_config.json", does_not_raise()), + (None, pytest.raises(Exception, match="Unable to read config")), + ), +) +def test_load_config( + filename: str, expected_exception: Any, test_resources_path: Path, monkeypatch: Any +) -> None: + """Test load_config.""" + with expected_exception: + configs: list[Path | IO[bytes] | None] = ( + [None] + if not filename + else [ + # Ignore pylint warning as 'with' can't be used inside of a + # generator expression. + # pylint: disable=consider-using-with + open(test_resources_path / filename, "rb"), + test_resources_path / filename, + ] + ) + for config in configs: + json_mock = MagicMock() + monkeypatch.setattr("mlia.backend.executor.common.json.load", json_mock) + load_config(config) + json_mock.assert_called_once() + + +class TestBackend: + """Test Backend class.""" + + def test___repr__(self) -> None: + """Test the representation of Backend instance.""" + backend = Backend( + BaseBackendConfig(name="Testing name", description="Testing description") + ) + assert str(backend) == "Testing name" + + def test__eq__(self) -> None: + """Test equality method with different cases.""" + backend1 = Backend(BaseBackendConfig(name="name", description="description")) + backend1.commands = {"command": Command(["command"])} + + backend2 = Backend(BaseBackendConfig(name="name", description="description")) + backend2.commands = {"command": Command(["command"])} + + backend3 = Backend( + BaseBackendConfig( + name="Ben", description="This is not the Backend you are looking for" + ) + ) + backend3.commands = {"wave": Command(["wave hand"])} + + backend4 = "Foo" # checking not isinstance(backend4, Backend) + + assert backend1 == backend2 + assert backend1 != backend3 + assert backend1 != backend4 + + @pytest.mark.parametrize( + "parameter, valid", + [ + ("--choice-param value_1", True), + ("--choice-param wrong_value", False), + ("--open-param something", True), + ("--wrong-param value", False), + ], + ) + def test_validate_parameter( + self, parameter: str, valid: bool, test_resources_path: Path + ) -> None: + """Test validate_parameter.""" + config = cast( + List[ApplicationConfig], + load_config(test_resources_path / "hello_world.json"), + ) + # The application configuration is a list of configurations so we need + # only the first one + # Exercise the validate_parameter test using the Application classe which + # inherits from Backend. + application = Application(config[0]) + assert application.validate_parameter("run", parameter) == valid + + def test_validate_parameter_with_invalid_command( + self, test_resources_path: Path + ) -> None: + """Test validate_parameter with an invalid command_name.""" + config = cast( + List[ApplicationConfig], + load_config(test_resources_path / "hello_world.json"), + ) + application = Application(config[0]) + with pytest.raises(AttributeError) as err: + # command foo does not exist, so raise an error + application.validate_parameter("foo", "bar") + assert "Unknown command: 'foo'" in str(err.value) + + def test_build_command(self) -> None: + """Test command building.""" + config = { + "name": "test", + "commands": { + "build": ["build {user_params:0} {user_params:1}"], + "run": ["run {user_params:0}"], + "post_run": ["post_run {application_params:0} on {system_params:0}"], + "some_command": ["Command with {variables:var_A}"], + "empty_command": [""], + }, + "user_params": { + "build": [ + { + "name": "choice_param_0=", + "values": [1, 2, 3], + "default_value": 1, + }, + {"name": "choice_param_1", "values": [3, 4, 5], "default_value": 3}, + {"name": "choice_param_3", "values": [6, 7, 8]}, + ], + "run": [{"name": "flag_param_0"}], + }, + "variables": {"var_A": "value for variable A"}, + } + + application, system = Application(config), System(config) # type: ignore + context = ExecutionContext( + app=application, + app_params=[], + system=system, + system_params=[], + ) + + param_resolver = ParamResolver(context) + + cmd = application.build_command( + "build", ["choice_param_0=2", "choice_param_1=4"], param_resolver + ) + assert cmd == ["build choice_param_0=2 choice_param_1 4"] + + cmd = application.build_command("build", ["choice_param_0=2"], param_resolver) + assert cmd == ["build choice_param_0=2 choice_param_1 3"] + + cmd = application.build_command( + "build", ["choice_param_0=2", "choice_param_3=7"], param_resolver + ) + assert cmd == ["build choice_param_0=2 choice_param_1 3"] + + with pytest.raises( + ConfigurationException, match="Command 'foo' could not be found." + ): + application.build_command("foo", [""], param_resolver) + + cmd = application.build_command("some_command", [], param_resolver) + assert cmd == ["Command with value for variable A"] + + cmd = application.build_command("empty_command", [], param_resolver) + assert cmd == [""] + + @pytest.mark.parametrize("class_", [Application, System]) + def test_build_command_unknown_variable(self, class_: type) -> None: + """Test that unable to construct backend with unknown variable.""" + with pytest.raises(Exception, match="Unknown variable var1"): + config = {"name": "test", "commands": {"run": ["run {variables:var1}"]}} + class_(config) + + @pytest.mark.parametrize( + "class_, config, expected_output", + [ + ( + Application, + { + "name": "test", + "commands": { + "build": ["build {user_params:0} {user_params:1}"], + "run": ["run {user_params:0}"], + }, + "user_params": { + "build": [ + { + "name": "choice_param_0=", + "values": ["a", "b", "c"], + "default_value": "a", + "alias": "param_1", + }, + { + "name": "choice_param_1", + "values": ["a", "b", "c"], + "default_value": "a", + "alias": "param_2", + }, + {"name": "choice_param_3", "values": ["a", "b", "c"]}, + ], + "run": [{"name": "flag_param_0"}], + }, + }, + [ + ( + "b", + Param( + name="choice_param_0=", + description="", + values=["a", "b", "c"], + default_value="a", + alias="param_1", + ), + ), + ( + "a", + Param( + name="choice_param_1", + description="", + values=["a", "b", "c"], + default_value="a", + alias="param_2", + ), + ), + ( + "c", + Param( + name="choice_param_3", + description="", + values=["a", "b", "c"], + ), + ), + ], + ), + (System, {"name": "test"}, []), + ], + ) + def test_resolved_parameters( + self, + class_: type, + config: dict, + expected_output: list[tuple[str | None, Param]], + ) -> None: + """Test command building.""" + backend = class_(config) + + params = backend.resolved_parameters( + "build", ["choice_param_0=b", "choice_param_3=c"] + ) + assert params == expected_output + + @pytest.mark.parametrize( + ["param_name", "user_param", "expected_value"], + [ + ( + "test_name", + "test_name=1234", + "1234", + ), # optional parameter using '=' + ( + "test_name", + "test_name 1234", + "1234", + ), # optional parameter using ' ' + ("test_name", "test_name", None), # flag + (None, "test_name=1234", "1234"), # positional parameter + ], + ) + def test_resolved_user_parameters( + self, param_name: str, user_param: str, expected_value: str + ) -> None: + """Test different variants to provide user parameters.""" + # A sample config providing one backend config + config = { + "name": "test_backend", + "commands": { + "test": ["user_param:test_param"], + }, + "user_params": { + "test": [UserParamConfig(name=param_name, alias="test_name")], + }, + } + backend = Backend(cast(BaseBackendConfig, config)) + params = backend.resolved_parameters( + command_name="test", user_params=[user_param] + ) + assert len(params) == 1 + value, param = params[0] + assert param_name == param.name + assert expected_value == value + + @pytest.mark.parametrize( + "input_param,expected", + [ + ("--param=1", ("--param", "1")), + ("--param 1", ("--param", "1")), + ("--flag", ("--flag", None)), + ], + ) + def test__parse_raw_parameter( + self, input_param: str, expected: tuple[str, str | None] + ) -> None: + """Test internal method of parsing a single raw parameter.""" + assert parse_raw_parameter(input_param) == expected + + +class TestParam: + """Test Param class.""" + + def test__eq__(self) -> None: + """Test equality method with different cases.""" + param1 = Param(name="test", description="desc", values=["values"]) + param2 = Param(name="test", description="desc", values=["values"]) + param3 = Param(name="test1", description="desc", values=["values"]) + param4 = object() + + assert param1 == param2 + assert param1 != param3 + assert param1 != param4 + + def test_get_details(self) -> None: + """Test get_details() method.""" + param1 = Param(name="test", description="desc", values=["values"]) + assert param1.get_details() == { + "name": "test", + "values": ["values"], + "description": "desc", + } + + def test_invalid(self) -> None: + """Test invalid use cases for the Param class.""" + with pytest.raises( + ConfigurationException, + match="Either name, alias or both must be set to identify a parameter.", + ): + Param(name=None, description="desc", values=["values"]) + + +class TestCommand: + """Test Command class.""" + + def test_get_details(self) -> None: + """Test get_details() method.""" + param1 = Param(name="test", description="desc", values=["values"]) + command1 = Command(command_strings=["echo test"], params=[param1]) + assert command1.get_details() == { + "command_strings": ["echo test"], + "user_params": [ + {"name": "test", "values": ["values"], "description": "desc"} + ], + } + + def test__eq__(self) -> None: + """Test equality method with different cases.""" + param1 = Param("test", "desc", ["values"]) + param2 = Param("test1", "desc1", ["values1"]) + command1 = Command(command_strings=["echo test"], params=[param1]) + command2 = Command(command_strings=["echo test"], params=[param1]) + command3 = Command(command_strings=["echo test"]) + command4 = Command(command_strings=["echo test"], params=[param2]) + command5 = object() + + assert command1 == command2 + assert command1 != command3 + assert command1 != command4 + assert command1 != command5 + + @pytest.mark.parametrize( + "params, expected_error", + [ + [[], does_not_raise()], + [[Param("param", "param description", [])], does_not_raise()], + [ + [ + Param("param", "param description", [], None, "alias"), + Param("param", "param description", [], None), + ], + does_not_raise(), + ], + [ + [ + Param("param1", "param1 description", [], None, "alias1"), + Param("param2", "param2 description", [], None, "alias2"), + ], + does_not_raise(), + ], + [ + [ + Param("param", "param description", [], None, "alias"), + Param("param", "param description", [], None, "alias"), + ], + pytest.raises(ConfigurationException, match="Non-unique aliases alias"), + ], + [ + [ + Param("alias", "param description", [], None, "alias1"), + Param("param", "param description", [], None, "alias"), + ], + pytest.raises( + ConfigurationException, + match="Aliases .* could not be used as parameter name", + ), + ], + [ + [ + Param("alias", "param description", [], None, "alias"), + Param("param1", "param1 description", [], None, "alias1"), + ], + does_not_raise(), + ], + [ + [ + Param("alias", "param description", [], None, "alias"), + Param("alias", "param1 description", [], None, "alias1"), + ], + pytest.raises( + ConfigurationException, + match="Aliases .* could not be used as parameter name", + ), + ], + [ + [ + Param("param1", "param1 description", [], None, "alias1"), + Param("param2", "param2 description", [], None, "alias1"), + Param("param3", "param3 description", [], None, "alias2"), + Param("param4", "param4 description", [], None, "alias2"), + ], + pytest.raises( + ConfigurationException, match="Non-unique aliases alias1, alias2" + ), + ], + ], + ) + def test_validate_params(self, params: list[Param], expected_error: Any) -> None: + """Test command validation function.""" + with expected_error: + Command([], params) diff --git a/tests/test_backend_executor_execution.py b/tests/test_backend_executor_execution.py new file mode 100644 index 0000000..6a6ea08 --- /dev/null +++ b/tests/test_backend_executor_execution.py @@ -0,0 +1,212 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Test backend execution module.""" +from pathlib import Path +from typing import Any +from unittest.mock import MagicMock + +import pytest + +from mlia.backend.executor.application import Application +from mlia.backend.executor.common import UserParamConfig +from mlia.backend.executor.config import ApplicationConfig +from mlia.backend.executor.config import SystemConfig +from mlia.backend.executor.execution import ExecutionContext +from mlia.backend.executor.execution import get_application_and_system +from mlia.backend.executor.execution import get_application_by_name_and_system +from mlia.backend.executor.execution import ParamResolver +from mlia.backend.executor.execution import run_application +from mlia.backend.executor.system import load_system + + +def test_context_param_resolver(tmpdir: Any) -> None: + """Test parameter resolving.""" + system_config_location = Path(tmpdir) / "system" + system_config_location.mkdir() + + application_config_location = Path(tmpdir) / "application" + application_config_location.mkdir() + + ctx = ExecutionContext( + app=Application( + ApplicationConfig( + name="test_application", + description="Test application", + config_location=application_config_location, + commands={ + "run": [ + "run_command1 {user_params:0}", + "run_command2 {user_params:1}", + ] + }, + variables={"var_1": "value for var_1"}, + user_params={ + "run": [ + UserParamConfig( + name="--param1", + description="Param 1", + default_value="123", + alias="param_1", + ), + UserParamConfig( + name="--param2", description="Param 2", default_value="456" + ), + UserParamConfig( + name="--param3", description="Param 3", alias="param_3" + ), + UserParamConfig( + name="--param4=", + description="Param 4", + default_value="456", + alias="param_4", + ), + UserParamConfig( + description="Param 5", + default_value="789", + alias="param_5", + ), + ] + }, + ) + ), + app_params=["--param2=789"], + system=load_system( + SystemConfig( + name="test_system", + description="Test system", + config_location=system_config_location, + commands={ + "build": ["build_command1 {user_params:0}"], + "run": ["run_command {application.commands.run:1}"], + }, + variables={"var_1": "value for var_1"}, + user_params={ + "build": [ + UserParamConfig( + name="--param1", description="Param 1", default_value="aaa" + ), + UserParamConfig(name="--param2", description="Param 2"), + ] + }, + ) + ), + system_params=["--param1=bbb"], + ) + + param_resolver = ParamResolver(ctx) + expected_values = { + "application.name": "test_application", + "application.description": "Test application", + "application.config_dir": str(application_config_location), + "application.commands.run:0": "run_command1 --param1 123", + "application.commands.run.params:0": "123", + "application.commands.run.params:param_1": "123", + "application.commands.run:1": "run_command2 --param2 789", + "application.commands.run.params:1": "789", + "application.variables:var_1": "value for var_1", + "system.name": "test_system", + "system.description": "Test system", + "system.config_dir": str(system_config_location), + "system.commands.build:0": "build_command1 --param1 bbb", + "system.commands.run:0": "run_command run_command2 --param2 789", + "system.commands.build.params:0": "bbb", + "system.variables:var_1": "value for var_1", + } + + for param, value in expected_values.items(): + assert param_resolver(param) == value + + expected_errors = { + "application.variables:var_2": pytest.raises( + Exception, match="Unknown variable var_2" + ), + "application.commands.clean:0": pytest.raises( + Exception, match="Command clean not found" + ), + "application.commands.run:2": pytest.raises( + Exception, match="Invalid index 2 for command run" + ), + "application.commands.run.params:5": pytest.raises( + Exception, match="Invalid parameter index 5 for command run" + ), + "application.commands.run.params:param_2": pytest.raises( + Exception, + match="No value for parameter with index or alias param_2 of command run", + ), + "UNKNOWN": pytest.raises( + Exception, match="Unable to resolve parameter UNKNOWN" + ), + "system.commands.build.params:1": pytest.raises( + Exception, + match="No value for parameter with index or alias 1 of command build", + ), + "system.commands.build:A": pytest.raises( + Exception, match="Bad command index A" + ), + "system.variables:var_2": pytest.raises( + Exception, match="Unknown variable var_2" + ), + } + for param, error in expected_errors.items(): + with error: + param_resolver(param) + + resolved_params = ctx.app.resolved_parameters("run", []) + expected_user_params = { + "user_params:0": "--param1 123", + "user_params:param_1": "--param1 123", + "user_params:2": "--param3", + "user_params:param_3": "--param3", + "user_params:3": "--param4=456", + "user_params:param_4": "--param4=456", + "user_params:param_5": "789", + } + for param, expected_value in expected_user_params.items(): + assert param_resolver(param, "run", resolved_params) == expected_value + + with pytest.raises( + Exception, match="Invalid index 5 for user params of command run" + ): + param_resolver("user_params:5", "run", resolved_params) + + with pytest.raises( + Exception, match="No user parameter for command 'run' with alias 'param_2'." + ): + param_resolver("user_params:param_2", "run", resolved_params) + + with pytest.raises(Exception, match="Unable to resolve user params"): + param_resolver("user_params:0", "", resolved_params) + + +def test_get_application_by_name_and_system(monkeypatch: Any) -> None: + """Test exceptional case for get_application_by_name_and_system.""" + monkeypatch.setattr( + "mlia.backend.executor.execution.get_application", + MagicMock(return_value=[MagicMock(), MagicMock()]), + ) + + with pytest.raises( + ValueError, + match="Error during getting application test_application for the " + "system test_system", + ): + get_application_by_name_and_system("test_application", "test_system") + + +def test_get_application_and_system(monkeypatch: Any) -> None: + """Test exceptional case for get_application_and_system.""" + monkeypatch.setattr( + "mlia.backend.executor.execution.get_system", MagicMock(return_value=None) + ) + + with pytest.raises(ValueError, match="System test_system is not found"): + get_application_and_system("test_application", "test_system") + + +def test_run_application() -> None: + """Test function run_application.""" + ctx = run_application("application_4", [], "System 4", []) + + assert isinstance(ctx, ExecutionContext) + assert ctx.stderr is not None and not ctx.stderr.decode() + assert ctx.stdout is not None and ctx.stdout.decode().strip() == "application_4" diff --git a/tests/test_backend_executor_fs.py b/tests/test_backend_executor_fs.py new file mode 100644 index 0000000..298b8db --- /dev/null +++ b/tests/test_backend_executor_fs.py @@ -0,0 +1,138 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Module for testing fs.py.""" +from __future__ import annotations + +from contextlib import ExitStack as does_not_raise +from pathlib import Path +from typing import Any +from unittest.mock import MagicMock + +import pytest + +from mlia.backend.executor.fs import get_backends_path +from mlia.backend.executor.fs import recreate_directory +from mlia.backend.executor.fs import remove_directory +from mlia.backend.executor.fs import remove_resource +from mlia.backend.executor.fs import ResourceType +from mlia.backend.executor.fs import valid_for_filename + + +@pytest.mark.parametrize( + "resource_name,expected_path", + [ + ("systems", does_not_raise()), + ("applications", does_not_raise()), + ("whaaat", pytest.raises(ResourceWarning)), + (None, pytest.raises(ResourceWarning)), + ], +) +def test_get_backends_path(resource_name: ResourceType, expected_path: Any) -> None: + """Test get_resources() with multiple parameters.""" + with expected_path: + resource_path = get_backends_path(resource_name) + assert resource_path.exists() + + +def test_remove_resource_wrong_directory( + monkeypatch: Any, test_applications_path: Path +) -> None: + """Test removing resource with wrong directory.""" + mock_get_resources = MagicMock(return_value=test_applications_path) + monkeypatch.setattr( + "mlia.backend.executor.fs.get_backends_path", mock_get_resources + ) + + mock_shutil_rmtree = MagicMock() + monkeypatch.setattr("mlia.backend.executor.fs.shutil.rmtree", mock_shutil_rmtree) + + with pytest.raises(Exception, match="Resource .* does not exist"): + remove_resource("unknown", "applications") + mock_shutil_rmtree.assert_not_called() + + with pytest.raises(Exception, match="Wrong resource .*"): + remove_resource("readme.txt", "applications") + mock_shutil_rmtree.assert_not_called() + + +def test_remove_resource(monkeypatch: Any, test_applications_path: Path) -> None: + """Test removing resource data.""" + mock_get_resources = MagicMock(return_value=test_applications_path) + monkeypatch.setattr( + "mlia.backend.executor.fs.get_backends_path", mock_get_resources + ) + + mock_shutil_rmtree = MagicMock() + monkeypatch.setattr("mlia.backend.executor.fs.shutil.rmtree", mock_shutil_rmtree) + + remove_resource("application1", "applications") + mock_shutil_rmtree.assert_called_once() + + +def test_remove_directory(tmpdir: Any) -> None: + """Test directory removal.""" + tmpdir_path = Path(tmpdir) + tmpfile = tmpdir_path / "temp.txt" + + for item in [None, tmpfile]: + with pytest.raises(Exception, match="No directory path provided"): + remove_directory(item) + + newdir = tmpdir_path / "newdir" + newdir.mkdir() + + assert newdir.is_dir() + remove_directory(newdir) + assert not newdir.exists() + + +def test_recreate_directory(tmpdir: Any) -> None: + """Test directory recreation.""" + with pytest.raises(Exception, match="No directory path provided"): + recreate_directory(None) + + tmpdir_path = Path(tmpdir) + tmpfile = tmpdir_path / "temp.txt" + tmpfile.touch() + with pytest.raises(Exception, match="Path .* does exist and it is not a directory"): + recreate_directory(tmpfile) + + newdir = tmpdir_path / "newdir" + newdir.mkdir() + newfile = newdir / "newfile" + newfile.touch() + assert list(newdir.iterdir()) == [newfile] + recreate_directory(newdir) + assert not list(newdir.iterdir()) + + newdir2 = tmpdir_path / "newdir2" + assert not newdir2.exists() + recreate_directory(newdir2) + assert newdir2.is_dir() + + +def write_to_file( + write_directory: Any, write_mode: str, write_text: str | bytes +) -> Path: + """Write some text to a temporary test file.""" + tmpdir_path = Path(write_directory) + tmpfile = tmpdir_path / "file_name.txt" + with open(tmpfile, write_mode) as file: # pylint: disable=unspecified-encoding + file.write(write_text) + return tmpfile + + +@pytest.mark.parametrize( + "value, replacement, expected_result", + [ + ["", "", ""], + ["123", "", "123"], + ["123", "_", "123"], + ["/some_folder/some_script.sh", "", "some_foldersome_script.sh"], + ["/some_folder/some_script.sh", "_", "_some_folder_some_script.sh"], + ["!;'some_name$%^!", "_", "___some_name____"], + ], +) +def test_valid_for_filename(value: str, replacement: str, expected_result: str) -> None: + """Test function valid_for_filename.""" + assert valid_for_filename(value, replacement) == expected_result diff --git a/tests/test_backend_executor_output_consumer.py b/tests/test_backend_executor_output_consumer.py new file mode 100644 index 0000000..537084f --- /dev/null +++ b/tests/test_backend_executor_output_consumer.py @@ -0,0 +1,100 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for the output parsing.""" +from __future__ import annotations + +import base64 +import json +from typing import Any + +import pytest + +from mlia.backend.executor.output_consumer import Base64OutputConsumer +from mlia.backend.executor.output_consumer import OutputConsumer + + +OUTPUT_MATCH_ALL = bytearray( + """ +String1: My awesome string! +String2: STRINGS_ARE_GREAT!!! +Int: 12 +Float: 3.14 +""", + encoding="utf-8", +) + +OUTPUT_NO_MATCH = bytearray( + """ +This contains no matches... +Test1234567890!"£$%^&*()_+@~{}[]/.,<>?| +""", + encoding="utf-8", +) + +OUTPUT_PARTIAL_MATCH = bytearray( + "String1: My awesome string!", + encoding="utf-8", +) + +REGEX_CONFIG = { + "FirstString": {"pattern": r"String1.*: (.*)", "type": "str"}, + "SecondString": {"pattern": r"String2.*: (.*)!!!", "type": "str"}, + "IntegerValue": {"pattern": r"Int.*: (.*)", "type": "int"}, + "FloatValue": {"pattern": r"Float.*: (.*)", "type": "float"}, +} + +EMPTY_REGEX_CONFIG: dict[str, dict[str, Any]] = {} + +EXPECTED_METRICS_ALL = { + "FirstString": "My awesome string!", + "SecondString": "STRINGS_ARE_GREAT", + "IntegerValue": 12, + "FloatValue": 3.14, +} + +EXPECTED_METRICS_PARTIAL = { + "FirstString": "My awesome string!", +} + + +@pytest.mark.parametrize( + "expected_metrics", + [ + EXPECTED_METRICS_ALL, + EXPECTED_METRICS_PARTIAL, + ], +) +def test_base64_output_consumer(expected_metrics: dict) -> None: + """ + Make sure the Base64OutputConsumer yields valid results. + + I.e. return an empty dict if either the input or the config is empty and + return the parsed metrics otherwise. + """ + parser = Base64OutputConsumer() + assert isinstance(parser, OutputConsumer) + + def create_base64_output(expected_metrics: dict) -> bytearray: + json_str = json.dumps(expected_metrics, indent=4) + json_b64 = base64.b64encode(json_str.encode("utf-8")) + return ( + OUTPUT_MATCH_ALL # Should not be matched by the Base64OutputConsumer + + f"<{Base64OutputConsumer.TAG_NAME}>".encode() + + bytearray(json_b64) + + f"".encode() + + OUTPUT_NO_MATCH # Just to add some difficulty... + ) + + output = create_base64_output(expected_metrics) + + consumed = False + for line in output.splitlines(): + if parser.feed(line.decode("utf-8")): + consumed = True + assert consumed # we should have consumed at least one line + + res = parser.parsed_output + assert len(res) == 1 + assert isinstance(res, list) + for val in res: + assert val == expected_metrics diff --git a/tests/test_backend_executor_proc.py b/tests/test_backend_executor_proc.py new file mode 100644 index 0000000..e8caf8a --- /dev/null +++ b/tests/test_backend_executor_proc.py @@ -0,0 +1,190 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +# pylint: disable=attribute-defined-outside-init,not-callable +"""Pytests for testing mlia/backend/proc.py.""" +from pathlib import Path +from typing import Any +from unittest import mock + +import pytest +from sh import ErrorReturnCode + +from mlia.backend.executor.proc import Command +from mlia.backend.executor.proc import CommandFailedException +from mlia.backend.executor.proc import CommandNotFound +from mlia.backend.executor.proc import parse_command +from mlia.backend.executor.proc import print_command_stdout +from mlia.backend.executor.proc import run_and_wait +from mlia.backend.executor.proc import ShellCommand +from mlia.backend.executor.proc import terminate_command + + +class TestShellCommand: + """Sample class for collecting tests.""" + + def test_run_ls(self, monkeypatch: Any) -> None: + """Test a simple ls command.""" + mock_command = mock.MagicMock() + monkeypatch.setattr(Command, "bake", mock_command) + + mock_get_stdout_stderr_paths = mock.MagicMock() + mock_get_stdout_stderr_paths.return_value = ("/path/std.out", "/path/std.err") + monkeypatch.setattr( + ShellCommand, "get_stdout_stderr_paths", mock_get_stdout_stderr_paths + ) + + shell_command = ShellCommand() + shell_command.run("ls", "-l") + assert mock_command.mock_calls[0] == mock.call(("-l",)) + assert mock_command.mock_calls[1] == mock.call()( + _bg=True, + _err="/path/std.err", + _out="/path/std.out", + _tee=True, + _bg_exc=False, + ) + + def test_run_command_not_found(self) -> None: + """Test whe the command doesn't exist.""" + shell_command = ShellCommand() + with pytest.raises(CommandNotFound): + shell_command.run("lsl", "-l") + + def test_get_stdout_stderr_paths(self) -> None: + """Test the method to get files to store stdout and stderr.""" + shell_command = ShellCommand() + out, err = shell_command.get_stdout_stderr_paths("cmd") + assert out.exists() and out.is_file() + assert err.exists() and err.is_file() + assert "cmd" in out.name + assert "cmd" in err.name + + +@mock.patch("builtins.print") +def test_print_command_stdout_alive(mock_print: Any) -> None: + """Test the print command stdout with an alive (running) process.""" + mock_command = mock.MagicMock() + mock_command.is_alive.return_value = True + mock_command.next.side_effect = ["test1", "test2", StopIteration] + + print_command_stdout(mock_command) + + mock_command.assert_has_calls( + [mock.call.is_alive(), mock.call.next(), mock.call.next()] + ) + mock_print.assert_has_calls( + [mock.call("test1", end=""), mock.call("test2", end="")] + ) + + +@mock.patch("builtins.print") +def test_print_command_stdout_not_alive(mock_print: Any) -> None: + """Test the print command stdout with a not alive (exited) process.""" + mock_command = mock.MagicMock() + mock_command.is_alive.return_value = False + mock_command.stdout = "test" + + print_command_stdout(mock_command) + mock_command.assert_has_calls([mock.call.is_alive()]) + mock_print.assert_called_once_with("test") + + +def test_terminate_command_no_process() -> None: + """Test command termination when process does not exist.""" + mock_command = mock.MagicMock() + mock_command.process.signal_group.side_effect = ProcessLookupError() + + terminate_command(mock_command) + mock_command.process.signal_group.assert_called_once() + mock_command.is_alive.assert_not_called() + + +def test_terminate_command() -> None: + """Test command termination.""" + mock_command = mock.MagicMock() + mock_command.is_alive.return_value = False + + terminate_command(mock_command) + mock_command.process.signal_group.assert_called_once() + + +def test_terminate_command_case1() -> None: + """Test command termination when it takes time..""" + mock_command = mock.MagicMock() + mock_command.is_alive.side_effect = [True, True, False] + + terminate_command(mock_command, wait_period=0.1) + mock_command.process.signal_group.assert_called_once() + assert mock_command.is_alive.call_count == 3 + + +def test_terminate_command_case2() -> None: + """Test command termination when it takes much time..""" + mock_command = mock.MagicMock() + mock_command.is_alive.side_effect = [True, True, True] + + terminate_command(mock_command, number_of_attempts=3, wait_period=0.1) + assert mock_command.is_alive.call_count == 3 + assert mock_command.process.signal_group.call_count == 2 + + +class TestRunAndWait: + """Test run_and_wait function.""" + + @pytest.fixture(autouse=True) + def setup_method(self, monkeypatch: Any) -> None: + """Init test method.""" + self.execute_command_mock = mock.MagicMock() + monkeypatch.setattr( + "mlia.backend.executor.proc.execute_command", self.execute_command_mock + ) + + self.terminate_command_mock = mock.MagicMock() + monkeypatch.setattr( + "mlia.backend.executor.proc.terminate_command", + self.terminate_command_mock, + ) + + def test_if_execute_command_raises_exception(self) -> None: + """Test if execute_command fails.""" + self.execute_command_mock.side_effect = Exception("Error!") + with pytest.raises(Exception, match="Error!"): + run_and_wait("command", Path.cwd()) + + def test_if_command_finishes_with_error(self) -> None: + """Test if command finishes with error.""" + cmd_mock = mock.MagicMock() + self.execute_command_mock.return_value = cmd_mock + exit_code_mock = mock.PropertyMock( + side_effect=ErrorReturnCode("cmd", bytearray(), bytearray()) + ) + type(cmd_mock).exit_code = exit_code_mock + + with pytest.raises(CommandFailedException): + run_and_wait("command", Path.cwd()) + + @pytest.mark.parametrize("terminate_on_error, call_count", ((False, 0), (True, 1))) + def test_if_command_finishes_with_exception( + self, terminate_on_error: bool, call_count: int + ) -> None: + """Test if command finishes with error.""" + cmd_mock = mock.MagicMock() + self.execute_command_mock.return_value = cmd_mock + exit_code_mock = mock.PropertyMock(side_effect=Exception("Error!")) + type(cmd_mock).exit_code = exit_code_mock + + with pytest.raises(Exception, match="Error!"): + run_and_wait("command", Path.cwd(), terminate_on_error=terminate_on_error) + + assert self.terminate_command_mock.call_count == call_count + + +def test_parse_command() -> None: + """Test parse_command function.""" + assert parse_command("1.sh") == ["bash", "1.sh"] + # The following line raises a B604 bandit error. In our case we specify + # what shell to use instead of using the default one. It is a safe use + # we are ignoring this instance. + assert parse_command("1.sh", shell="sh") == ["sh", "1.sh"] # nosec + assert parse_command("command") == ["command"] + assert parse_command("command 123 --param=1") == ["command", "123", "--param=1"] diff --git a/tests/test_backend_executor_runner.py b/tests/test_backend_executor_runner.py new file mode 100644 index 0000000..36c6e5e --- /dev/null +++ b/tests/test_backend_executor_runner.py @@ -0,0 +1,254 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for module backend/manager.""" +from __future__ import annotations + +from pathlib import Path +from unittest.mock import MagicMock +from unittest.mock import PropertyMock + +import pytest + +from mlia.backend.corstone.performance import BackendRunner +from mlia.backend.corstone.performance import ExecutionParams + + +class TestBackendRunner: + """Tests for BackendRunner class.""" + + @staticmethod + def _setup_backends( + monkeypatch: pytest.MonkeyPatch, + available_systems: list[str] | None = None, + available_apps: list[str] | None = None, + ) -> None: + """Set up backend metadata.""" + + def mock_system(system: str) -> MagicMock: + """Mock the System instance.""" + mock = MagicMock() + type(mock).name = PropertyMock(return_value=system) + return mock + + def mock_app(app: str) -> MagicMock: + """Mock the Application instance.""" + mock = MagicMock() + type(mock).name = PropertyMock(return_value=app) + mock.can_run_on.return_value = True + return mock + + system_mocks = [mock_system(name) for name in (available_systems or [])] + monkeypatch.setattr( + "mlia.backend.executor.runner.get_available_systems", + MagicMock(return_value=system_mocks), + ) + + apps_mock = [mock_app(name) for name in (available_apps or [])] + monkeypatch.setattr( + "mlia.backend.executor.runner.get_available_applications", + MagicMock(return_value=apps_mock), + ) + + @pytest.mark.parametrize( + "available_systems, system, installed", + [ + ([], "system1", False), + (["system1", "system2"], "system1", True), + ], + ) + def test_is_system_installed( + self, + available_systems: list, + system: str, + installed: bool, + monkeypatch: pytest.MonkeyPatch, + ) -> None: + """Test method is_system_installed.""" + backend_runner = BackendRunner() + + self._setup_backends(monkeypatch, available_systems) + + assert backend_runner.is_system_installed(system) == installed + + @pytest.mark.parametrize( + "available_systems, systems", + [ + ([], []), + (["system1"], ["system1"]), + ], + ) + def test_installed_systems( + self, + available_systems: list[str], + systems: list[str], + monkeypatch: pytest.MonkeyPatch, + ) -> None: + """Test method installed_systems.""" + backend_runner = BackendRunner() + + self._setup_backends(monkeypatch, available_systems) + assert backend_runner.get_installed_systems() == systems + + @staticmethod + def test_install_system(monkeypatch: pytest.MonkeyPatch) -> None: + """Test system installation.""" + install_system_mock = MagicMock() + monkeypatch.setattr( + "mlia.backend.executor.runner.install_system", install_system_mock + ) + + backend_runner = BackendRunner() + backend_runner.install_system(Path("test_system_path")) + + install_system_mock.assert_called_once_with(Path("test_system_path")) + + @pytest.mark.parametrize( + "available_systems, systems, expected_result", + [ + ([], [], False), + (["system1"], [], False), + (["system1"], ["system1"], True), + (["system1", "system2"], ["system1", "system3"], False), + (["system1", "system2"], ["system1", "system2"], True), + ], + ) + def test_systems_installed( + self, + available_systems: list[str], + systems: list[str], + expected_result: bool, + monkeypatch: pytest.MonkeyPatch, + ) -> None: + """Test method systems_installed.""" + self._setup_backends(monkeypatch, available_systems) + + backend_runner = BackendRunner() + + assert backend_runner.systems_installed(systems) is expected_result + + @pytest.mark.parametrize( + "available_apps, applications, expected_result", + [ + ([], [], False), + (["app1"], [], False), + (["app1"], ["app1"], True), + (["app1", "app2"], ["app1", "app3"], False), + (["app1", "app2"], ["app1", "app2"], True), + ], + ) + def test_applications_installed( + self, + available_apps: list[str], + applications: list[str], + expected_result: bool, + monkeypatch: pytest.MonkeyPatch, + ) -> None: + """Test method applications_installed.""" + self._setup_backends(monkeypatch, [], available_apps) + backend_runner = BackendRunner() + + assert backend_runner.applications_installed(applications) is expected_result + + @pytest.mark.parametrize( + "available_apps, applications", + [ + ([], []), + ( + ["application1", "application2"], + ["application1", "application2"], + ), + ], + ) + def test_get_installed_applications( + self, + available_apps: list[str], + applications: list[str], + monkeypatch: pytest.MonkeyPatch, + ) -> None: + """Test method get_installed_applications.""" + self._setup_backends(monkeypatch, [], available_apps) + + backend_runner = BackendRunner() + assert applications == backend_runner.get_installed_applications() + + @staticmethod + def test_install_application(monkeypatch: pytest.MonkeyPatch) -> None: + """Test application installation.""" + mock_install_application = MagicMock() + monkeypatch.setattr( + "mlia.backend.executor.runner.install_application", + mock_install_application, + ) + + backend_runner = BackendRunner() + backend_runner.install_application(Path("test_application_path")) + mock_install_application.assert_called_once_with(Path("test_application_path")) + + @pytest.mark.parametrize( + "available_apps, application, installed", + [ + ([], "system1", False), + ( + ["application1", "application2"], + "application1", + True, + ), + ( + [], + "application1", + False, + ), + ], + ) + def test_is_application_installed( + self, + available_apps: list[str], + application: str, + installed: bool, + monkeypatch: pytest.MonkeyPatch, + ) -> None: + """Test method is_application_installed.""" + self._setup_backends(monkeypatch, [], available_apps) + + backend_runner = BackendRunner() + assert installed == backend_runner.is_application_installed( + application, "system1" + ) + + @staticmethod + @pytest.mark.parametrize( + "execution_params, expected_command", + [ + ( + ExecutionParams("application_4", "System 4", [], []), + ["application_4", [], "System 4", []], + ), + ( + ExecutionParams( + "application_6", + "System 6", + ["param1=value2"], + ["sys-param1=value2"], + ), + [ + "application_6", + ["param1=value2"], + "System 6", + ["sys-param1=value2"], + ], + ), + ], + ) + def test_run_application_local( + monkeypatch: pytest.MonkeyPatch, + execution_params: ExecutionParams, + expected_command: list[str], + ) -> None: + """Test method run_application with local systems.""" + run_app = MagicMock() + monkeypatch.setattr("mlia.backend.executor.runner.run_application", run_app) + + backend_runner = BackendRunner() + backend_runner.run_application(execution_params) + + run_app.assert_called_once_with(*expected_command) diff --git a/tests/test_backend_executor_source.py b/tests/test_backend_executor_source.py new file mode 100644 index 0000000..3aa336e --- /dev/null +++ b/tests/test_backend_executor_source.py @@ -0,0 +1,205 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for the source backend module.""" +from collections import Counter +from contextlib import ExitStack as does_not_raise +from pathlib import Path +from typing import Any +from unittest.mock import MagicMock +from unittest.mock import patch + +import pytest + +from mlia.backend.executor.common import ConfigurationException +from mlia.backend.executor.source import create_destination_and_install +from mlia.backend.executor.source import DirectorySource +from mlia.backend.executor.source import get_source +from mlia.backend.executor.source import TarArchiveSource + + +def test_create_destination_and_install(test_systems_path: Path, tmpdir: Any) -> None: + """Test create_destination_and_install function.""" + system_directory = test_systems_path / "system1" + + dir_source = DirectorySource(system_directory) + resources = Path(tmpdir) + create_destination_and_install(dir_source, resources) + assert (resources / "system1").is_dir() + + +@patch( + "mlia.backend.executor.source.DirectorySource.create_destination", + return_value=False, +) +def test_create_destination_and_install_if_dest_creation_not_required( + mock_ds_create_destination: Any, tmpdir: Any +) -> None: + """Test create_destination_and_install function.""" + dir_source = DirectorySource(Path("unknown")) + resources = Path(tmpdir) + with pytest.raises(Exception): + create_destination_and_install(dir_source, resources) + + mock_ds_create_destination.assert_called_once() + + +def test_create_destination_and_install_if_installation_fails(tmpdir: Any) -> None: + """Test create_destination_and_install function if installation fails.""" + dir_source = DirectorySource(Path("unknown")) + resources = Path(tmpdir) + with pytest.raises(Exception, match="Directory .* does not exist"): + create_destination_and_install(dir_source, resources) + assert not (resources / "unknown").exists() + assert resources.exists() + + +def test_create_destination_and_install_if_name_is_empty() -> None: + """Test create_destination_and_install function fails if source name is empty.""" + source = MagicMock() + source.create_destination.return_value = True + source.name.return_value = None + + with pytest.raises(Exception, match="Unable to get source name"): + create_destination_and_install(source, Path("some_path")) + + source.install_into.assert_not_called() + + +@pytest.mark.parametrize( + "source_path, expected_class, expected_error", + [ + ( + Path("backends/applications/application1/"), + DirectorySource, + does_not_raise(), + ), + ( + Path("archives/applications/application1.tar.gz"), + TarArchiveSource, + does_not_raise(), + ), + ( + Path("doesnt/exist"), + None, + pytest.raises( + ConfigurationException, match="Unable to read .*doesnt/exist" + ), + ), + ], +) +def test_get_source( + source_path: Path, + expected_class: Any, + expected_error: Any, + test_resources_path: Path, +) -> None: + """Test get_source function.""" + with expected_error: + full_source_path = test_resources_path / source_path + source = get_source(full_source_path) + assert isinstance(source, expected_class) + + +class TestDirectorySource: + """Test DirectorySource class.""" + + @pytest.mark.parametrize( + "directory, name", + [ + (Path("/some/path/some_system"), "some_system"), + (Path("some_system"), "some_system"), + ], + ) + def test_name(self, directory: Path, name: str) -> None: + """Test getting source name.""" + assert DirectorySource(directory).name() == name + + def test_install_into(self, test_systems_path: Path, tmpdir: Any) -> None: + """Test install directory into destination.""" + system_directory = test_systems_path / "system1" + + dir_source = DirectorySource(system_directory) + with pytest.raises(Exception, match="Wrong destination .*"): + dir_source.install_into(Path("unknown_destination")) + + tmpdir_path = Path(tmpdir) + dir_source.install_into(tmpdir_path) + source_files = [f.name for f in system_directory.iterdir()] + dest_files = [f.name for f in tmpdir_path.iterdir()] + assert Counter(source_files) == Counter(dest_files) + + def test_install_into_unknown_source_directory(self, tmpdir: Any) -> None: + """Test install system from unknown directory.""" + with pytest.raises(Exception, match="Directory .* does not exist"): + DirectorySource(Path("unknown_directory")).install_into(Path(tmpdir)) + + +class TestTarArchiveSource: + """Test TarArchiveSource class.""" + + @pytest.mark.parametrize( + "archive, name", + [ + (Path("some_archive.tgz"), "some_archive"), + (Path("some_archive.tar.gz"), "some_archive"), + (Path("some_archive"), "some_archive"), + ("archives/systems/system1.tar.gz", "system1"), + ("archives/systems/system1_dir.tar.gz", "system1"), + ], + ) + def test_name(self, test_resources_path: Path, archive: Path, name: str) -> None: + """Test getting source name.""" + assert TarArchiveSource(test_resources_path / archive).name() == name + + def test_install_into(self, test_resources_path: Path, tmpdir: Any) -> None: + """Test install archive into destination.""" + system_archive = test_resources_path / "archives/systems/system1.tar.gz" + + tar_source = TarArchiveSource(system_archive) + with pytest.raises(Exception, match="Wrong destination .*"): + tar_source.install_into(Path("unknown_destination")) + + tmpdir_path = Path(tmpdir) + tar_source.install_into(tmpdir_path) + source_files = [ + "backend-config.json.license", + "backend-config.json", + "system_artifact", + ] + dest_files = [f.name for f in tmpdir_path.iterdir()] + assert Counter(source_files) == Counter(dest_files) + + def test_install_into_unknown_source_archive(self, tmpdir: Any) -> None: + """Test install unknown source archive.""" + with pytest.raises(Exception, match="File .* does not exist"): + TarArchiveSource(Path("unknown.tar.gz")).install_into(Path(tmpdir)) + + def test_install_into_unsupported_source_archive(self, tmpdir: Any) -> None: + """Test install unsupported file type.""" + plain_text_file = Path(tmpdir) / "test_file" + plain_text_file.write_text("Not a system config") + + with pytest.raises(Exception, match="Unsupported archive type .*"): + TarArchiveSource(plain_text_file).install_into(Path(tmpdir)) + + def test_lazy_property_init(self, test_resources_path: Path) -> None: + """Test that class properties initialized correctly.""" + system_archive = test_resources_path / "archives/systems/system1.tar.gz" + + tar_source = TarArchiveSource(system_archive) + assert tar_source.name() == "system1" + assert tar_source.config() is not None + assert tar_source.create_destination() + + tar_source = TarArchiveSource(system_archive) + assert tar_source.config() is not None + assert tar_source.create_destination() + assert tar_source.name() == "system1" + + def test_create_destination_property(self, test_resources_path: Path) -> None: + """Test create_destination property filled correctly for different archives.""" + system_archive1 = test_resources_path / "archives/systems/system1.tar.gz" + system_archive2 = test_resources_path / "archives/systems/system1_dir.tar.gz" + + assert TarArchiveSource(system_archive1).create_destination() + assert not TarArchiveSource(system_archive2).create_destination() diff --git a/tests/test_backend_executor_system.py b/tests/test_backend_executor_system.py new file mode 100644 index 0000000..c94ef30 --- /dev/null +++ b/tests/test_backend_executor_system.py @@ -0,0 +1,358 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for system backend.""" +from __future__ import annotations + +from contextlib import ExitStack as does_not_raise +from pathlib import Path +from typing import Any +from unittest.mock import MagicMock + +import pytest + +from mlia.backend.executor.common import Command +from mlia.backend.executor.common import ConfigurationException +from mlia.backend.executor.common import Param +from mlia.backend.executor.common import UserParamConfig +from mlia.backend.executor.config import SystemConfig +from mlia.backend.executor.system import get_available_systems +from mlia.backend.executor.system import get_system +from mlia.backend.executor.system import install_system +from mlia.backend.executor.system import load_system +from mlia.backend.executor.system import remove_system +from mlia.backend.executor.system import System + + +def test_get_available_systems() -> None: + """Test get_available_systems mocking get_resources.""" + available_systems = get_available_systems() + assert all(isinstance(s, System) for s in available_systems) + assert len(available_systems) == 4 + assert [str(s) for s in available_systems] == [ + "System 1", + "System 2", + "System 4", + "System 6", + ] + + +def test_get_system() -> None: + """Test get_system.""" + system1 = get_system("System 1") + assert isinstance(system1, System) + assert system1.name == "System 1" + + system2 = get_system("System 2") + # check that comparison with object of another type returns false + assert system1 != 42 + assert system1 != system2 + + with pytest.raises( + ConfigurationException, match="System 'Unknown system' not found." + ): + get_system("Unknown system") + + +@pytest.mark.parametrize( + "source, call_count, exception_type", + ( + ( + "archives/systems/system1.tar.gz", + 0, + pytest.raises(Exception, match="Systems .* are already installed"), + ), + ( + "archives/systems/system3.tar.gz", + 0, + pytest.raises(Exception, match="Unable to read system definition"), + ), + ( + "backends/systems/system1", + 0, + pytest.raises(Exception, match="Systems .* are already installed"), + ), + ( + "backends/systems/system3", + 0, + pytest.raises(Exception, match="Unable to read system definition"), + ), + ("unknown_path", 0, pytest.raises(Exception, match="Unable to read")), + ( + "various/systems/system_with_empty_config", + 0, + pytest.raises(Exception, match="No system definition found"), + ), + ("various/systems/system_with_valid_config", 1, does_not_raise()), + ), +) +def test_install_system( + monkeypatch: Any, + test_resources_path: Path, + source: str, + call_count: int, + exception_type: Any, +) -> None: + """Test system installation from archive.""" + mock_create_destination_and_install = MagicMock() + monkeypatch.setattr( + "mlia.backend.executor.system.create_destination_and_install", + mock_create_destination_and_install, + ) + + with exception_type: + install_system(test_resources_path / source) + + assert mock_create_destination_and_install.call_count == call_count + + +def test_remove_system(monkeypatch: Any) -> None: + """Test system removal.""" + mock_remove_backend = MagicMock() + monkeypatch.setattr( + "mlia.backend.executor.system.remove_backend", mock_remove_backend + ) + remove_system("some_system_dir") + mock_remove_backend.assert_called_once() + + +def test_system() -> None: + """Test the System class.""" + config = SystemConfig(name="System 1") + system = System(config) + assert str(system) == "System 1" + assert system.name == "System 1" + + +def test_system_with_empty_parameter_name() -> None: + """Test that configuration fails if parameter name is empty.""" + bad_config = SystemConfig( + name="System 1", + commands={"run": ["run"]}, + user_params={"run": [{"name": "", "values": ["1", "2", "3"]}]}, + ) + with pytest.raises(Exception, match="Parameter has an empty 'name' attribute."): + System(bad_config) + + +def test_system_run() -> None: + """Test run operation for system.""" + system = get_system("System 4") + assert isinstance(system, System) + + system.run("echo 'application run'") + + +def test_system_start_no_config_location() -> None: + """Test that system without config location could not start.""" + system = load_system(SystemConfig(name="test")) + + assert isinstance(system, System) + with pytest.raises( + ConfigurationException, match="System has invalid config location: None" + ): + system.run("sleep 100") + + +@pytest.mark.parametrize( + "config, expected_class, expected_error", + [ + ( + SystemConfig(name="test"), + System, + does_not_raise(), + ), + (SystemConfig(), None, pytest.raises(ConfigurationException)), + ], +) +def test_load_system( + config: SystemConfig, expected_class: type, expected_error: Any +) -> None: + """Test load_system function.""" + if not expected_class: + with expected_error: + load_system(config) + else: + system = load_system(config) + assert isinstance(system, expected_class) + + +def test_load_system_populate_shared_params() -> None: + """Test shared parameters population.""" + with pytest.raises(Exception, match="All shared parameters should have aliases"): + load_system( + SystemConfig( + name="test_system", + user_params={ + "shared": [ + UserParamConfig( + name="--shared_param1", + description="Shared parameter", + values=["1", "2", "3"], + default_value="1", + ) + ] + }, + ) + ) + + with pytest.raises( + Exception, match="All parameters for command run should have aliases" + ): + load_system( + SystemConfig( + name="test_system", + user_params={ + "shared": [ + UserParamConfig( + name="--shared_param1", + description="Shared parameter", + values=["1", "2", "3"], + default_value="1", + alias="shared_param1", + ) + ], + "run": [ + UserParamConfig( + name="--run_param1", + description="Run specific parameter", + values=["1", "2", "3"], + default_value="2", + ) + ], + }, + ) + ) + system0 = load_system( + SystemConfig( + name="test_system", + commands={"run": ["run_command"]}, + user_params={ + "shared": [], + "run": [ + UserParamConfig( + name="--run_param1", + description="Run specific parameter", + values=["1", "2", "3"], + default_value="2", + alias="run_param1", + ) + ], + }, + ) + ) + assert len(system0.commands) == 1 + run_command1 = system0.commands["run"] + assert run_command1 == Command( + ["run_command"], + [ + Param( + "--run_param1", + "Run specific parameter", + ["1", "2", "3"], + "2", + "run_param1", + ) + ], + ) + + system1 = load_system( + SystemConfig( + name="test_system", + user_params={ + "shared": [ + UserParamConfig( + name="--shared_param1", + description="Shared parameter", + values=["1", "2", "3"], + default_value="1", + alias="shared_param1", + ) + ], + "run": [ + UserParamConfig( + name="--run_param1", + description="Run specific parameter", + values=["1", "2", "3"], + default_value="2", + alias="run_param1", + ) + ], + }, + ) + ) + assert len(system1.commands) == 1 + + run_command1 = system1.commands["run"] + assert run_command1 == Command( + [], + [ + Param( + "--shared_param1", + "Shared parameter", + ["1", "2", "3"], + "1", + "shared_param1", + ), + Param( + "--run_param1", + "Run specific parameter", + ["1", "2", "3"], + "2", + "run_param1", + ), + ], + ) + + system2 = load_system( + SystemConfig( + name="test_system", + commands={"build": ["build_command"]}, + user_params={ + "shared": [ + UserParamConfig( + name="--shared_param1", + description="Shared parameter", + values=["1", "2", "3"], + default_value="1", + alias="shared_param1", + ) + ], + "run": [ + UserParamConfig( + name="--run_param1", + description="Run specific parameter", + values=["1", "2", "3"], + default_value="2", + alias="run_param1", + ) + ], + }, + ) + ) + assert len(system2.commands) == 2 + build_command2 = system2.commands["build"] + assert build_command2 == Command( + ["build_command"], + [], + ) + + run_command2 = system1.commands["run"] + assert run_command2 == Command( + [], + [ + Param( + "--shared_param1", + "Shared parameter", + ["1", "2", "3"], + "1", + "shared_param1", + ), + Param( + "--run_param1", + "Run specific parameter", + ["1", "2", "3"], + "2", + "run_param1", + ), + ], + ) diff --git a/tests/test_backend_fs.py b/tests/test_backend_fs.py deleted file mode 100644 index 292a7cc..0000000 --- a/tests/test_backend_fs.py +++ /dev/null @@ -1,134 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Module for testing fs.py.""" -from __future__ import annotations - -from contextlib import ExitStack as does_not_raise -from pathlib import Path -from typing import Any -from unittest.mock import MagicMock - -import pytest - -from mlia.backend.fs import get_backends_path -from mlia.backend.fs import recreate_directory -from mlia.backend.fs import remove_directory -from mlia.backend.fs import remove_resource -from mlia.backend.fs import ResourceType -from mlia.backend.fs import valid_for_filename - - -@pytest.mark.parametrize( - "resource_name,expected_path", - [ - ("systems", does_not_raise()), - ("applications", does_not_raise()), - ("whaaat", pytest.raises(ResourceWarning)), - (None, pytest.raises(ResourceWarning)), - ], -) -def test_get_backends_path(resource_name: ResourceType, expected_path: Any) -> None: - """Test get_resources() with multiple parameters.""" - with expected_path: - resource_path = get_backends_path(resource_name) - assert resource_path.exists() - - -def test_remove_resource_wrong_directory( - monkeypatch: Any, test_applications_path: Path -) -> None: - """Test removing resource with wrong directory.""" - mock_get_resources = MagicMock(return_value=test_applications_path) - monkeypatch.setattr("mlia.backend.fs.get_backends_path", mock_get_resources) - - mock_shutil_rmtree = MagicMock() - monkeypatch.setattr("mlia.backend.fs.shutil.rmtree", mock_shutil_rmtree) - - with pytest.raises(Exception, match="Resource .* does not exist"): - remove_resource("unknown", "applications") - mock_shutil_rmtree.assert_not_called() - - with pytest.raises(Exception, match="Wrong resource .*"): - remove_resource("readme.txt", "applications") - mock_shutil_rmtree.assert_not_called() - - -def test_remove_resource(monkeypatch: Any, test_applications_path: Path) -> None: - """Test removing resource data.""" - mock_get_resources = MagicMock(return_value=test_applications_path) - monkeypatch.setattr("mlia.backend.fs.get_backends_path", mock_get_resources) - - mock_shutil_rmtree = MagicMock() - monkeypatch.setattr("mlia.backend.fs.shutil.rmtree", mock_shutil_rmtree) - - remove_resource("application1", "applications") - mock_shutil_rmtree.assert_called_once() - - -def test_remove_directory(tmpdir: Any) -> None: - """Test directory removal.""" - tmpdir_path = Path(tmpdir) - tmpfile = tmpdir_path / "temp.txt" - - for item in [None, tmpfile]: - with pytest.raises(Exception, match="No directory path provided"): - remove_directory(item) - - newdir = tmpdir_path / "newdir" - newdir.mkdir() - - assert newdir.is_dir() - remove_directory(newdir) - assert not newdir.exists() - - -def test_recreate_directory(tmpdir: Any) -> None: - """Test directory recreation.""" - with pytest.raises(Exception, match="No directory path provided"): - recreate_directory(None) - - tmpdir_path = Path(tmpdir) - tmpfile = tmpdir_path / "temp.txt" - tmpfile.touch() - with pytest.raises(Exception, match="Path .* does exist and it is not a directory"): - recreate_directory(tmpfile) - - newdir = tmpdir_path / "newdir" - newdir.mkdir() - newfile = newdir / "newfile" - newfile.touch() - assert list(newdir.iterdir()) == [newfile] - recreate_directory(newdir) - assert not list(newdir.iterdir()) - - newdir2 = tmpdir_path / "newdir2" - assert not newdir2.exists() - recreate_directory(newdir2) - assert newdir2.is_dir() - - -def write_to_file( - write_directory: Any, write_mode: str, write_text: str | bytes -) -> Path: - """Write some text to a temporary test file.""" - tmpdir_path = Path(write_directory) - tmpfile = tmpdir_path / "file_name.txt" - with open(tmpfile, write_mode) as file: # pylint: disable=unspecified-encoding - file.write(write_text) - return tmpfile - - -@pytest.mark.parametrize( - "value, replacement, expected_result", - [ - ["", "", ""], - ["123", "", "123"], - ["123", "_", "123"], - ["/some_folder/some_script.sh", "", "some_foldersome_script.sh"], - ["/some_folder/some_script.sh", "_", "_some_folder_some_script.sh"], - ["!;'some_name$%^!", "_", "___some_name____"], - ], -) -def test_valid_for_filename(value: str, replacement: str, expected_result: str) -> None: - """Test function valid_for_filename.""" - assert valid_for_filename(value, replacement) == expected_result diff --git a/tests/test_backend_install.py b/tests/test_backend_install.py new file mode 100644 index 0000000..024a833 --- /dev/null +++ b/tests/test_backend_install.py @@ -0,0 +1,124 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for common management functionality.""" +from __future__ import annotations + +from pathlib import Path + +import pytest + +from mlia.backend.install import BackendInfo +from mlia.backend.install import get_all_application_names +from mlia.backend.install import get_all_system_names +from mlia.backend.install import get_system_name +from mlia.backend.install import is_supported +from mlia.backend.install import StaticPathChecker +from mlia.backend.install import supported_backends + + +@pytest.mark.parametrize( + "copy_source, system_config", + [ + (True, "system_config.json"), + (True, None), + (False, "system_config.json"), + (False, None), + ], +) +def test_static_path_checker( + tmp_path: Path, copy_source: bool, system_config: str +) -> None: + """Test static path checker.""" + checker = StaticPathChecker(tmp_path, ["file1.txt"], copy_source, system_config) + tmp_path.joinpath("file1.txt").touch() + + result = checker(tmp_path) + assert result == BackendInfo(tmp_path, copy_source, system_config) + + +def test_static_path_checker_invalid_path(tmp_path: Path) -> None: + """Test static path checker with invalid path.""" + checker = StaticPathChecker(tmp_path, ["file1.txt"]) + + result = checker(tmp_path) + assert result is None + + result = checker(tmp_path / "unknown_directory") + assert result is None + + +def test_supported_backends() -> None: + """Test function supported backends.""" + assert supported_backends() == ["Corstone-300", "Corstone-310"] + + +@pytest.mark.parametrize( + "backend, expected_result", + [ + ["unknown_backend", False], + ["Corstone-300", True], + ["Corstone-310", True], + ], +) +def test_is_supported(backend: str, expected_result: bool) -> None: + """Test function is_supported.""" + assert is_supported(backend) == expected_result + + +@pytest.mark.parametrize( + "backend, expected_result", + [ + [ + "Corstone-300", + [ + "Corstone-300: Cortex-M55+Ethos-U55", + "Corstone-300: Cortex-M55+Ethos-U65", + ], + ], + [ + "Corstone-310", + [ + "Corstone-310: Cortex-M85+Ethos-U55", + "Corstone-310: Cortex-M85+Ethos-U65", + ], + ], + ], +) +def test_get_all_system_names(backend: str, expected_result: list[str]) -> None: + """Test function get_all_system_names.""" + assert sorted(get_all_system_names(backend)) == expected_result + + +@pytest.mark.parametrize( + "backend, expected_result", + [ + [ + "Corstone-300", + [ + "Generic Inference Runner: Ethos-U55", + "Generic Inference Runner: Ethos-U65", + ], + ], + [ + "Corstone-310", + [ + "Generic Inference Runner: Ethos-U55", + "Generic Inference Runner: Ethos-U65", + ], + ], + ], +) +def test_get_all_application_names(backend: str, expected_result: list[str]) -> None: + """Test function get_all_application_names.""" + assert sorted(get_all_application_names(backend)) == expected_result + + +def test_get_system_name() -> None: + """Test function get_system_name.""" + assert ( + get_system_name("Corstone-300", "ethos-u55") + == "Corstone-300: Cortex-M55+Ethos-U55" + ) + + with pytest.raises(KeyError): + get_system_name("some_backend", "some_type") diff --git a/tests/test_backend_manager.py b/tests/test_backend_manager.py index dfbcdaa..19cb357 100644 --- a/tests/test_backend_manager.py +++ b/tests/test_backend_manager.py @@ -1,758 +1,282 @@ # SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. # SPDX-License-Identifier: Apache-2.0 -"""Tests for module backend/manager.""" +"""Tests for installation manager.""" from __future__ import annotations -import base64 -import json -from contextlib import ExitStack as does_not_raise from pathlib import Path from typing import Any +from unittest.mock import call from unittest.mock import MagicMock from unittest.mock import PropertyMock import pytest -from mlia.backend.application import get_application -from mlia.backend.execution import ExecutionContext -from mlia.backend.manager import BackendRunner -from mlia.backend.manager import DeviceInfo -from mlia.backend.manager import estimate_performance -from mlia.backend.manager import ExecutionParams -from mlia.backend.manager import GenericInferenceOutputParser -from mlia.backend.manager import GenericInferenceRunnerEthosU -from mlia.backend.manager import get_generic_runner -from mlia.backend.manager import get_system_name -from mlia.backend.manager import is_supported -from mlia.backend.manager import ModelInfo -from mlia.backend.manager import PerformanceMetrics -from mlia.backend.manager import supported_backends -from mlia.backend.output_consumer import Base64OutputConsumer -from mlia.backend.system import get_system - - -def _mock_encode_b64(data: dict[str, int]) -> str: - """ - Encode the given data into a mock base64-encoded string of JSON. - - This reproduces the base64 encoding done in the Corstone applications. - - JSON example: - - ```json - [{'count': 1, - 'profiling_group': 'Inference', - 'samples': [{'name': 'NPU IDLE', 'value': [612]}, - {'name': 'NPU AXI0_RD_DATA_BEAT_RECEIVED', 'value': [165872]}, - {'name': 'NPU AXI0_WR_DATA_BEAT_WRITTEN', 'value': [88712]}, - {'name': 'NPU AXI1_RD_DATA_BEAT_RECEIVED', 'value': [57540]}, - {'name': 'NPU ACTIVE', 'value': [520489]}, - {'name': 'NPU TOTAL', 'value': [521101]}]}] - ``` - """ - wrapped_data = [ - { - "count": 1, - "profiling_group": "Inference", - "samples": [ - {"name": name, "value": [value]} for name, value in data.items() - ], - } - ] - json_str = json.dumps(wrapped_data) - json_bytes = bytearray(json_str, encoding="utf-8") - json_b64 = base64.b64encode(json_bytes).decode("utf-8") - tag = Base64OutputConsumer.TAG_NAME - return f"<{tag}>{json_b64}" +from mlia.backend.install import DownloadAndInstall +from mlia.backend.install import Installation +from mlia.backend.install import InstallationType +from mlia.backend.install import InstallFromPath +from mlia.backend.manager import DefaultInstallationManager -@pytest.mark.parametrize( - "data, is_ready, result, missed_keys", - [ - ( - [], - False, - {}, - { - "npu_active_cycles", - "npu_axi0_rd_data_beat_received", - "npu_axi0_wr_data_beat_written", - "npu_axi1_rd_data_beat_received", - "npu_idle_cycles", - "npu_total_cycles", - }, - ), - ( - ["sample text"], - False, - {}, - { - "npu_active_cycles", - "npu_axi0_rd_data_beat_received", - "npu_axi0_wr_data_beat_written", - "npu_axi1_rd_data_beat_received", - "npu_idle_cycles", - "npu_total_cycles", - }, - ), - ( - [_mock_encode_b64({"NPU AXI0_RD_DATA_BEAT_RECEIVED": 123})], - False, - {"npu_axi0_rd_data_beat_received": 123}, - { - "npu_active_cycles", - "npu_axi0_wr_data_beat_written", - "npu_axi1_rd_data_beat_received", - "npu_idle_cycles", - "npu_total_cycles", - }, - ), - ( - [ - _mock_encode_b64( - { - "NPU AXI0_RD_DATA_BEAT_RECEIVED": 1, - "NPU AXI0_WR_DATA_BEAT_WRITTEN": 2, - "NPU AXI1_RD_DATA_BEAT_RECEIVED": 3, - "NPU ACTIVE": 4, - "NPU IDLE": 5, - "NPU TOTAL": 6, - } - ) - ], - True, - { - "npu_axi0_rd_data_beat_received": 1, - "npu_axi0_wr_data_beat_written": 2, - "npu_axi1_rd_data_beat_received": 3, - "npu_active_cycles": 4, - "npu_idle_cycles": 5, - "npu_total_cycles": 6, - }, - set(), - ), - ], -) -def test_generic_inference_output_parser( - data: dict[str, int], is_ready: bool, result: dict, missed_keys: set[str] -) -> None: - """Test generic runner output parser.""" - parser = GenericInferenceOutputParser() - - for line in data: - parser.feed(line) - - assert parser.is_ready() == is_ready - assert parser.result == result - assert parser.missed_keys() == missed_keys - - -class TestBackendRunner: - """Tests for BackendRunner class.""" - - @staticmethod - def _setup_backends( - monkeypatch: pytest.MonkeyPatch, - available_systems: list[str] | None = None, - available_apps: list[str] | None = None, - ) -> None: - """Set up backend metadata.""" - - def mock_system(system: str) -> MagicMock: - """Mock the System instance.""" - mock = MagicMock() - type(mock).name = PropertyMock(return_value=system) - return mock - - def mock_app(app: str) -> MagicMock: - """Mock the Application instance.""" - mock = MagicMock() - type(mock).name = PropertyMock(return_value=app) - mock.can_run_on.return_value = True - return mock - - system_mocks = [mock_system(name) for name in (available_systems or [])] - monkeypatch.setattr( - "mlia.backend.manager.get_available_systems", - MagicMock(return_value=system_mocks), - ) +def get_default_installation_manager_mock( + name: str, + already_installed: bool = False, +) -> MagicMock: + """Get mock instance for DefaultInstallationManager.""" + mock = MagicMock(spec=DefaultInstallationManager) - apps_mock = [mock_app(name) for name in (available_apps or [])] - monkeypatch.setattr( - "mlia.backend.manager.get_available_applications", - MagicMock(return_value=apps_mock), - ) + props = { + "name": name, + "already_installed": already_installed, + } + for prop, value in props.items(): + setattr(type(mock), prop, PropertyMock(return_value=value)) - @pytest.mark.parametrize( - "available_systems, system, installed", - [ - ([], "system1", False), - (["system1", "system2"], "system1", True), - ], - ) - def test_is_system_installed( - self, - available_systems: list, - system: str, - installed: bool, - monkeypatch: pytest.MonkeyPatch, - ) -> None: - """Test method is_system_installed.""" - backend_runner = BackendRunner() - - self._setup_backends(monkeypatch, available_systems) - - assert backend_runner.is_system_installed(system) == installed - - @pytest.mark.parametrize( - "available_systems, systems", - [ - ([], []), - (["system1"], ["system1"]), - ], - ) - def test_installed_systems( - self, - available_systems: list[str], - systems: list[str], - monkeypatch: pytest.MonkeyPatch, - ) -> None: - """Test method installed_systems.""" - backend_runner = BackendRunner() - - self._setup_backends(monkeypatch, available_systems) - assert backend_runner.get_installed_systems() == systems - - @staticmethod - def test_install_system(monkeypatch: pytest.MonkeyPatch) -> None: - """Test system installation.""" - install_system_mock = MagicMock() - monkeypatch.setattr("mlia.backend.manager.install_system", install_system_mock) - - backend_runner = BackendRunner() - backend_runner.install_system(Path("test_system_path")) - - install_system_mock.assert_called_once_with(Path("test_system_path")) - - @pytest.mark.parametrize( - "available_systems, systems, expected_result", - [ - ([], [], False), - (["system1"], [], False), - (["system1"], ["system1"], True), - (["system1", "system2"], ["system1", "system3"], False), - (["system1", "system2"], ["system1", "system2"], True), - ], - ) - def test_systems_installed( - self, - available_systems: list[str], - systems: list[str], - expected_result: bool, - monkeypatch: pytest.MonkeyPatch, - ) -> None: - """Test method systems_installed.""" - self._setup_backends(monkeypatch, available_systems) - - backend_runner = BackendRunner() - - assert backend_runner.systems_installed(systems) is expected_result - - @pytest.mark.parametrize( - "available_apps, applications, expected_result", - [ - ([], [], False), - (["app1"], [], False), - (["app1"], ["app1"], True), - (["app1", "app2"], ["app1", "app3"], False), - (["app1", "app2"], ["app1", "app2"], True), - ], - ) - def test_applications_installed( - self, - available_apps: list[str], - applications: list[str], - expected_result: bool, - monkeypatch: pytest.MonkeyPatch, - ) -> None: - """Test method applications_installed.""" - self._setup_backends(monkeypatch, [], available_apps) - backend_runner = BackendRunner() - - assert backend_runner.applications_installed(applications) is expected_result - - @pytest.mark.parametrize( - "available_apps, applications", - [ - ([], []), - ( - ["application1", "application2"], - ["application1", "application2"], - ), - ], - ) - def test_get_installed_applications( - self, - available_apps: list[str], - applications: list[str], - monkeypatch: pytest.MonkeyPatch, - ) -> None: - """Test method get_installed_applications.""" - self._setup_backends(monkeypatch, [], available_apps) - - backend_runner = BackendRunner() - assert applications == backend_runner.get_installed_applications() - - @staticmethod - def test_install_application(monkeypatch: pytest.MonkeyPatch) -> None: - """Test application installation.""" - mock_install_application = MagicMock() - monkeypatch.setattr( - "mlia.backend.manager.install_application", mock_install_application - ) + return mock - backend_runner = BackendRunner() - backend_runner.install_application(Path("test_application_path")) - mock_install_application.assert_called_once_with(Path("test_application_path")) - @pytest.mark.parametrize( - "available_apps, application, installed", - [ - ([], "system1", False), - ( - ["application1", "application2"], - "application1", - True, - ), - ( - [], - "application1", - False, - ), - ], +def _ready_for_uninstall_mock() -> MagicMock: + return get_default_installation_manager_mock( + name="already_installed", + already_installed=True, ) - def test_is_application_installed( - self, - available_apps: list[str], - application: str, - installed: bool, - monkeypatch: pytest.MonkeyPatch, - ) -> None: - """Test method is_application_installed.""" - self._setup_backends(monkeypatch, [], available_apps) - - backend_runner = BackendRunner() - assert installed == backend_runner.is_application_installed( - application, "system1" - ) - @staticmethod - @pytest.mark.parametrize( - "execution_params, expected_command", - [ - ( - ExecutionParams("application_4", "System 4", [], []), - ["application_4", [], "System 4", []], - ), - ( - ExecutionParams( - "application_6", - "System 6", - ["param1=value2"], - ["sys-param1=value2"], - ), - [ - "application_6", - ["param1=value2"], - "System 6", - ["sys-param1=value2"], - ], - ), - ], - ) - def test_run_application_local( - monkeypatch: pytest.MonkeyPatch, - execution_params: ExecutionParams, - expected_command: list[str], - ) -> None: - """Test method run_application with local systems.""" - run_app = MagicMock() - monkeypatch.setattr("mlia.backend.manager.run_application", run_app) - backend_runner = BackendRunner() - backend_runner.run_application(execution_params) +def get_installation_mock( + name: str, + already_installed: bool = False, + could_be_installed: bool = False, + supported_install_type: type | tuple | None = None, +) -> MagicMock: + """Get mock instance for the installation.""" + mock = MagicMock(spec=Installation) - run_app.assert_called_once_with(*expected_command) + def supports(install_type: InstallationType) -> bool: + if supported_install_type is None: + return False + return isinstance(install_type, supported_install_type) -@pytest.mark.parametrize( - "device, system, application, backend, expected_error", - [ - ( - DeviceInfo(device_type="ethos-u55", mac=32), - ("Corstone-300: Cortex-M55+Ethos-U55", True), - ("Generic Inference Runner: Ethos-U55", True), - "Corstone-300", - does_not_raise(), - ), - ( - DeviceInfo(device_type="ethos-u55", mac=32), - ("Corstone-300: Cortex-M55+Ethos-U55", False), - ("Generic Inference Runner: Ethos-U55", False), - "Corstone-300", - pytest.raises( - Exception, - match=r"System Corstone-300: Cortex-M55\+Ethos-U55 is not installed", - ), - ), - ( - DeviceInfo(device_type="ethos-u55", mac=32), - ("Corstone-300: Cortex-M55+Ethos-U55", True), - ("Generic Inference Runner: Ethos-U55", False), - "Corstone-300", - pytest.raises( - Exception, - match=r"Application Generic Inference Runner: Ethos-U55 " - r"for the system Corstone-300: Cortex-M55\+Ethos-U55 is not installed", - ), - ), - ( - DeviceInfo(device_type="ethos-u55", mac=32), - ("Corstone-310: Cortex-M85+Ethos-U55", True), - ("Generic Inference Runner: Ethos-U55", True), - "Corstone-310", - does_not_raise(), - ), - ( - DeviceInfo(device_type="ethos-u55", mac=32), - ("Corstone-310: Cortex-M85+Ethos-U55", False), - ("Generic Inference Runner: Ethos-U55", False), - "Corstone-310", - pytest.raises( - Exception, - match=r"System Corstone-310: Cortex-M85\+Ethos-U55 is not installed", - ), - ), - ( - DeviceInfo(device_type="ethos-u55", mac=32), - ("Corstone-310: Cortex-M85+Ethos-U55", True), - ("Generic Inference Runner: Ethos-U55", False), - "Corstone-310", - pytest.raises( - Exception, - match=r"Application Generic Inference Runner: Ethos-U55 " - r"for the system Corstone-310: Cortex-M85\+Ethos-U55 is not installed", - ), - ), - ( - DeviceInfo(device_type="ethos-u65", mac=512), - ("Corstone-300: Cortex-M55+Ethos-U65", True), - ("Generic Inference Runner: Ethos-U65", True), - "Corstone-300", - does_not_raise(), - ), - ( - DeviceInfo(device_type="ethos-u65", mac=512), - ("Corstone-300: Cortex-M55+Ethos-U65", False), - ("Generic Inference Runner: Ethos-U65", False), - "Corstone-300", - pytest.raises( - Exception, - match=r"System Corstone-300: Cortex-M55\+Ethos-U65 is not installed", - ), - ), - ( - DeviceInfo(device_type="ethos-u65", mac=512), - ("Corstone-300: Cortex-M55+Ethos-U65", True), - ("Generic Inference Runner: Ethos-U65", False), - "Corstone-300", - pytest.raises( - Exception, - match=r"Application Generic Inference Runner: Ethos-U65 " - r"for the system Corstone-300: Cortex-M55\+Ethos-U65 is not installed", - ), - ), - ( - DeviceInfo(device_type="ethos-u65", mac=512), - ("Corstone-310: Cortex-M85+Ethos-U65", True), - ("Generic Inference Runner: Ethos-U65", True), - "Corstone-310", - does_not_raise(), - ), - ( - DeviceInfo(device_type="ethos-u65", mac=512), - ("Corstone-310: Cortex-M85+Ethos-U65", False), - ("Generic Inference Runner: Ethos-U65", False), - "Corstone-310", - pytest.raises( - Exception, - match=r"System Corstone-310: Cortex-M85\+Ethos-U65 is not installed", - ), - ), - ( - DeviceInfo(device_type="ethos-u65", mac=512), - ("Corstone-310: Cortex-M85+Ethos-U65", True), - ("Generic Inference Runner: Ethos-U65", False), - "Corstone-310", - pytest.raises( - Exception, - match=r"Application Generic Inference Runner: Ethos-U65 " - r"for the system Corstone-310: Cortex-M85\+Ethos-U65 is not installed", - ), - ), - ( - DeviceInfo( - device_type="unknown_device", # type: ignore - mac=None, # type: ignore - ), - ("some_system", False), - ("some_application", False), - "some backend", - pytest.raises(Exception, match="Unsupported device unknown_device"), - ), - ], -) -def test_estimate_performance( - device: DeviceInfo, - system: tuple[str, bool], - application: tuple[str, bool], - backend: str, - expected_error: Any, - test_tflite_model: Path, - backend_runner: MagicMock, -) -> None: - """Test getting performance estimations.""" - system_name, system_installed = system - application_name, application_installed = application + mock.supports.side_effect = supports - backend_runner.is_system_installed.return_value = system_installed - backend_runner.is_application_installed.return_value = application_installed + props = { + "name": name, + "already_installed": already_installed, + "could_be_installed": could_be_installed, + } + for prop, value in props.items(): + setattr(type(mock), prop, PropertyMock(return_value=value)) - mock_context = create_mock_context( - [ - _mock_encode_b64( - { - "NPU AXI0_RD_DATA_BEAT_RECEIVED": 1, - "NPU AXI0_WR_DATA_BEAT_WRITTEN": 2, - "NPU AXI1_RD_DATA_BEAT_RECEIVED": 3, - "NPU ACTIVE": 4, - "NPU IDLE": 5, - "NPU TOTAL": 6, - } - ) - ] - ) + return mock - backend_runner.run_application.return_value = mock_context - with expected_error: - perf_metrics = estimate_performance( - ModelInfo(test_tflite_model), device, backend - ) +def _already_installed_mock() -> MagicMock: + return get_installation_mock( + name="already_installed", + already_installed=True, + supported_install_type=(DownloadAndInstall, InstallFromPath), + ) - assert isinstance(perf_metrics, PerformanceMetrics) - assert perf_metrics == PerformanceMetrics( - npu_axi0_rd_data_beat_received=1, - npu_axi0_wr_data_beat_written=2, - npu_axi1_rd_data_beat_received=3, - npu_active_cycles=4, - npu_idle_cycles=5, - npu_total_cycles=6, - ) - assert backend_runner.is_system_installed.called_once_with(system_name) - assert backend_runner.is_application_installed.called_once_with( - application_name, system_name - ) +def _ready_for_installation_mock() -> MagicMock: + return get_installation_mock( + name="ready_for_installation", + already_installed=False, + could_be_installed=True, + ) -@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310")) -def test_estimate_performance_insufficient_data( - backend_runner: MagicMock, test_tflite_model: Path, backend: str -) -> None: - """Test that performance could not be estimated when not all data presented.""" - backend_runner.is_system_installed.return_value = True - backend_runner.is_application_installed.return_value = True - - no_total_cycles_output = { - "NPU AXI0_RD_DATA_BEAT_RECEIVED": 1, - "NPU AXI0_WR_DATA_BEAT_WRITTEN": 2, - "NPU AXI1_RD_DATA_BEAT_RECEIVED": 3, - "NPU ACTIVE": 4, - "NPU IDLE": 5, - } - mock_context = create_mock_context([_mock_encode_b64(no_total_cycles_output)]) +def _could_be_downloaded_and_installed_mock() -> MagicMock: + return get_installation_mock( + name="could_be_downloaded_and_installed", + already_installed=False, + could_be_installed=True, + supported_install_type=DownloadAndInstall, + ) - backend_runner.run_application.return_value = mock_context - with pytest.raises( - Exception, match="Unable to get performance metrics, insufficient data" - ): - device = DeviceInfo(device_type="ethos-u55", mac=32) - estimate_performance(ModelInfo(test_tflite_model), device, backend) +def _could_be_installed_from_mock() -> MagicMock: + return get_installation_mock( + name="could_be_installed_from", + already_installed=False, + could_be_installed=True, + supported_install_type=InstallFromPath, + ) -@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310")) -def test_estimate_performance_invalid_output( - test_tflite_model: Path, backend_runner: MagicMock, backend: str -) -> None: - """Test estimation could not be done if inference produces unexpected output.""" - backend_runner.is_system_installed.return_value = True - backend_runner.is_application_installed.return_value = True - - mock_context = create_mock_context(["Something", "is", "wrong"]) - backend_runner.run_application.return_value = mock_context - - with pytest.raises(Exception, match="Unable to get performance metrics"): - estimate_performance( - ModelInfo(test_tflite_model), - DeviceInfo(device_type="ethos-u55", mac=256), - backend=backend, +def get_installation_manager( + noninteractive: bool, + installations: list[Any], + monkeypatch: pytest.MonkeyPatch, + yes_response: bool = True, +) -> DefaultInstallationManager: + """Get installation manager instance.""" + if not noninteractive: + monkeypatch.setattr( + "mlia.backend.manager.yes", MagicMock(return_value=yes_response) ) + return DefaultInstallationManager(installations, noninteractive=noninteractive) -def create_mock_process(stdout: list[str], stderr: list[str]) -> MagicMock: - """Mock underlying process.""" - mock_process = MagicMock() - mock_process.poll.return_value = 0 - type(mock_process).stdout = PropertyMock(return_value=iter(stdout)) - type(mock_process).stderr = PropertyMock(return_value=iter(stderr)) - return mock_process +def test_installation_manager_filtering() -> None: + """Test default installation manager.""" + already_installed = _already_installed_mock() + ready_for_installation = _ready_for_installation_mock() + could_be_downloaded_and_installed = _could_be_downloaded_and_installed_mock() -def create_mock_context(stdout: list[str]) -> ExecutionContext: - """Mock ExecutionContext.""" - ctx = ExecutionContext( - app=get_application("application_1")[0], - app_params=[], - system=get_system("System 1"), - system_params=[], + manager = DefaultInstallationManager( + [ + already_installed, + ready_for_installation, + could_be_downloaded_and_installed, + ] ) - ctx.stdout = bytearray("\n".join(stdout).encode("utf-8")) - return ctx + assert manager.already_installed("already_installed") == [already_installed] + assert manager.ready_for_installation() == [ + ready_for_installation, + could_be_downloaded_and_installed, + ] -@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310")) -def test_get_generic_runner(backend: str) -> None: - """Test function get_generic_runner().""" - device_info = DeviceInfo("ethos-u55", 256) +@pytest.mark.parametrize("noninteractive", [True, False]) +@pytest.mark.parametrize( + "install_mock, eula_agreement, backend_name, force, expected_call", + [ + [ + _could_be_downloaded_and_installed_mock(), + True, + "could_be_downloaded_and_installed", + False, + [call(DownloadAndInstall(eula_agreement=True))], + ], + [ + _could_be_downloaded_and_installed_mock(), + False, + "could_be_downloaded_and_installed", + True, + [call(DownloadAndInstall(eula_agreement=False))], + ], + [ + _already_installed_mock(), + False, + "already_installed", + True, + [call(DownloadAndInstall(eula_agreement=False))], + ], + [ + _could_be_downloaded_and_installed_mock(), + False, + "unknown", + True, + [], + ], + ], +) +def test_installation_manager_download_and_install( + install_mock: MagicMock, + noninteractive: bool, + eula_agreement: bool, + backend_name: str, + force: bool, + expected_call: Any, + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Test installation process.""" + install_mock.reset_mock() - runner = get_generic_runner(device_info=device_info, backend=backend) - assert isinstance(runner, GenericInferenceRunnerEthosU) + manager = get_installation_manager(noninteractive, [install_mock], monkeypatch) - with pytest.raises(RuntimeError): - get_generic_runner(device_info=device_info, backend="UNKNOWN_BACKEND") + manager.download_and_install( + backend_name, eula_agreement=eula_agreement, force=force + ) + assert install_mock.install.mock_calls == expected_call + if force and install_mock.already_installed: + install_mock.uninstall.assert_called_once() + else: + install_mock.uninstall.assert_not_called() + +@pytest.mark.parametrize("noninteractive", [True, False]) @pytest.mark.parametrize( - ("backend", "device_type"), - ( - ("Corstone-300", "ethos-u55"), - ("Corstone-300", "ethos-u65"), - ("Corstone-310", "ethos-u55"), - ), + "install_mock, backend_name, force, expected_call", + [ + [ + _could_be_installed_from_mock(), + "could_be_installed_from", + False, + [call(InstallFromPath(Path("some_path")))], + ], + [ + _could_be_installed_from_mock(), + "unknown", + False, + [], + ], + [ + _could_be_installed_from_mock(), + "unknown", + True, + [], + ], + [ + _already_installed_mock(), + "already_installed", + False, + [], + ], + [ + _already_installed_mock(), + "already_installed", + True, + [call(InstallFromPath(Path("some_path")))], + ], + ], ) -def test_backend_support(backend: str, device_type: str) -> None: - """Test backend & device support.""" - assert is_supported(backend) - assert is_supported(backend, device_type) - - assert get_system_name(backend, device_type) +def test_installation_manager_install_from( + install_mock: MagicMock, + noninteractive: bool, + backend_name: str, + force: bool, + expected_call: Any, + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Test installation process.""" + install_mock.reset_mock() - assert backend in supported_backends() + manager = get_installation_manager(noninteractive, [install_mock], monkeypatch) + manager.install_from(Path("some_path"), backend_name, force=force) + assert install_mock.install.mock_calls == expected_call + if force and install_mock.already_installed: + install_mock.uninstall.assert_called_once() + else: + install_mock.uninstall.assert_not_called() -class TestGenericInferenceRunnerEthosU: - """Test for the class GenericInferenceRunnerEthosU.""" - @staticmethod - @pytest.mark.parametrize( - "device, backend, expected_system, expected_app", +@pytest.mark.parametrize("noninteractive", [True, False]) +@pytest.mark.parametrize( + "install_mock, backend_name, expected_call", + [ [ - [ - DeviceInfo("ethos-u55", 256), - "Corstone-300", - "Corstone-300: Cortex-M55+Ethos-U55", - "Generic Inference Runner: Ethos-U55", - ], - [ - DeviceInfo("ethos-u65", 256), - "Corstone-300", - "Corstone-300: Cortex-M55+Ethos-U65", - "Generic Inference Runner: Ethos-U65", - ], - [ - DeviceInfo("ethos-u55", 256), - "Corstone-310", - "Corstone-310: Cortex-M85+Ethos-U55", - "Generic Inference Runner: Ethos-U55", - ], - [ - DeviceInfo("ethos-u65", 256), - "Corstone-310", - "Corstone-310: Cortex-M85+Ethos-U65", - "Generic Inference Runner: Ethos-U65", - ], + _ready_for_uninstall_mock(), + "already_installed", + [call()], ], - ) - def test_artifact_resolver( - device: DeviceInfo, backend: str, expected_system: str, expected_app: str - ) -> None: - """Test artifact resolving based on the provided parameters.""" - generic_runner = get_generic_runner(device, backend) - assert isinstance(generic_runner, GenericInferenceRunnerEthosU) - - assert generic_runner.system_name == expected_system - assert generic_runner.app_name == expected_app - - @staticmethod - def test_artifact_resolver_unsupported_backend() -> None: - """Test that it should be not possible to use unsupported backends.""" - with pytest.raises( - RuntimeError, match="Unsupported device ethos-u65 for backend test_backend" - ): - get_generic_runner(DeviceInfo("ethos-u65", 256), "test_backend") - - @staticmethod - @pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310")) - def test_inference_should_fail_if_system_not_installed( - backend_runner: MagicMock, test_tflite_model: Path, backend: str - ) -> None: - """Test that inference should fail if system is not installed.""" - backend_runner.is_system_installed.return_value = False - - generic_runner = get_generic_runner(DeviceInfo("ethos-u55", 256), backend) - with pytest.raises( - Exception, - match=r"System Corstone-3[01]0: Cortex-M[58]5\+Ethos-U55 is not installed", - ): - generic_runner.run(ModelInfo(test_tflite_model), []) - - @staticmethod - @pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310")) - def test_inference_should_fail_is_apps_not_installed( - backend_runner: MagicMock, test_tflite_model: Path, backend: str - ) -> None: - """Test that inference should fail if apps are not installed.""" - backend_runner.is_system_installed.return_value = True - backend_runner.is_application_installed.return_value = False - - generic_runner = get_generic_runner(DeviceInfo("ethos-u55", 256), backend) - with pytest.raises( - Exception, - match="Application Generic Inference Runner: Ethos-U55" - r" for the system Corstone-3[01]0: Cortex-M[58]5\+Ethos-U55 is not " - r"installed", - ): - generic_runner.run(ModelInfo(test_tflite_model), []) - - -@pytest.fixture(name="backend_runner") -def fixture_backend_runner(monkeypatch: pytest.MonkeyPatch) -> MagicMock: - """Mock backend runner.""" - backend_runner_mock = MagicMock(spec=BackendRunner) - monkeypatch.setattr( - "mlia.backend.manager.get_backend_runner", - MagicMock(return_value=backend_runner_mock), - ) - return backend_runner_mock + ], +) +def test_installation_manager_uninstall( + install_mock: MagicMock, + noninteractive: bool, + backend_name: str, + expected_call: Any, + monkeypatch: pytest.MonkeyPatch, +) -> None: + """Test uninstallation.""" + install_mock.reset_mock() + + manager = get_installation_manager(noninteractive, [install_mock], monkeypatch) + manager.uninstall(backend_name) + + assert install_mock.uninstall.mock_calls == expected_call diff --git a/tests/test_backend_output_consumer.py b/tests/test_backend_output_consumer.py deleted file mode 100644 index 2a46787..0000000 --- a/tests/test_backend_output_consumer.py +++ /dev/null @@ -1,100 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for the output parsing.""" -from __future__ import annotations - -import base64 -import json -from typing import Any - -import pytest - -from mlia.backend.output_consumer import Base64OutputConsumer -from mlia.backend.output_consumer import OutputConsumer - - -OUTPUT_MATCH_ALL = bytearray( - """ -String1: My awesome string! -String2: STRINGS_ARE_GREAT!!! -Int: 12 -Float: 3.14 -""", - encoding="utf-8", -) - -OUTPUT_NO_MATCH = bytearray( - """ -This contains no matches... -Test1234567890!"£$%^&*()_+@~{}[]/.,<>?| -""", - encoding="utf-8", -) - -OUTPUT_PARTIAL_MATCH = bytearray( - "String1: My awesome string!", - encoding="utf-8", -) - -REGEX_CONFIG = { - "FirstString": {"pattern": r"String1.*: (.*)", "type": "str"}, - "SecondString": {"pattern": r"String2.*: (.*)!!!", "type": "str"}, - "IntegerValue": {"pattern": r"Int.*: (.*)", "type": "int"}, - "FloatValue": {"pattern": r"Float.*: (.*)", "type": "float"}, -} - -EMPTY_REGEX_CONFIG: dict[str, dict[str, Any]] = {} - -EXPECTED_METRICS_ALL = { - "FirstString": "My awesome string!", - "SecondString": "STRINGS_ARE_GREAT", - "IntegerValue": 12, - "FloatValue": 3.14, -} - -EXPECTED_METRICS_PARTIAL = { - "FirstString": "My awesome string!", -} - - -@pytest.mark.parametrize( - "expected_metrics", - [ - EXPECTED_METRICS_ALL, - EXPECTED_METRICS_PARTIAL, - ], -) -def test_base64_output_consumer(expected_metrics: dict) -> None: - """ - Make sure the Base64OutputConsumer yields valid results. - - I.e. return an empty dict if either the input or the config is empty and - return the parsed metrics otherwise. - """ - parser = Base64OutputConsumer() - assert isinstance(parser, OutputConsumer) - - def create_base64_output(expected_metrics: dict) -> bytearray: - json_str = json.dumps(expected_metrics, indent=4) - json_b64 = base64.b64encode(json_str.encode("utf-8")) - return ( - OUTPUT_MATCH_ALL # Should not be matched by the Base64OutputConsumer - + f"<{Base64OutputConsumer.TAG_NAME}>".encode() - + bytearray(json_b64) - + f"".encode() - + OUTPUT_NO_MATCH # Just to add some difficulty... - ) - - output = create_base64_output(expected_metrics) - - consumed = False - for line in output.splitlines(): - if parser.feed(line.decode("utf-8")): - consumed = True - assert consumed # we should have consumed at least one line - - res = parser.parsed_output - assert len(res) == 1 - assert isinstance(res, list) - for val in res: - assert val == expected_metrics diff --git a/tests/test_backend_proc.py b/tests/test_backend_proc.py deleted file mode 100644 index d2c2cd4..0000000 --- a/tests/test_backend_proc.py +++ /dev/null @@ -1,189 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -# pylint: disable=attribute-defined-outside-init,not-callable -"""Pytests for testing mlia/backend/proc.py.""" -from pathlib import Path -from typing import Any -from unittest import mock - -import pytest -from sh import ErrorReturnCode - -from mlia.backend.proc import Command -from mlia.backend.proc import CommandFailedException -from mlia.backend.proc import CommandNotFound -from mlia.backend.proc import parse_command -from mlia.backend.proc import print_command_stdout -from mlia.backend.proc import run_and_wait -from mlia.backend.proc import ShellCommand -from mlia.backend.proc import terminate_command - - -class TestShellCommand: - """Sample class for collecting tests.""" - - def test_run_ls(self, monkeypatch: Any) -> None: - """Test a simple ls command.""" - mock_command = mock.MagicMock() - monkeypatch.setattr(Command, "bake", mock_command) - - mock_get_stdout_stderr_paths = mock.MagicMock() - mock_get_stdout_stderr_paths.return_value = ("/path/std.out", "/path/std.err") - monkeypatch.setattr( - ShellCommand, "get_stdout_stderr_paths", mock_get_stdout_stderr_paths - ) - - shell_command = ShellCommand() - shell_command.run("ls", "-l") - assert mock_command.mock_calls[0] == mock.call(("-l",)) - assert mock_command.mock_calls[1] == mock.call()( - _bg=True, - _err="/path/std.err", - _out="/path/std.out", - _tee=True, - _bg_exc=False, - ) - - def test_run_command_not_found(self) -> None: - """Test whe the command doesn't exist.""" - shell_command = ShellCommand() - with pytest.raises(CommandNotFound): - shell_command.run("lsl", "-l") - - def test_get_stdout_stderr_paths(self) -> None: - """Test the method to get files to store stdout and stderr.""" - shell_command = ShellCommand() - out, err = shell_command.get_stdout_stderr_paths("cmd") - assert out.exists() and out.is_file() - assert err.exists() and err.is_file() - assert "cmd" in out.name - assert "cmd" in err.name - - -@mock.patch("builtins.print") -def test_print_command_stdout_alive(mock_print: Any) -> None: - """Test the print command stdout with an alive (running) process.""" - mock_command = mock.MagicMock() - mock_command.is_alive.return_value = True - mock_command.next.side_effect = ["test1", "test2", StopIteration] - - print_command_stdout(mock_command) - - mock_command.assert_has_calls( - [mock.call.is_alive(), mock.call.next(), mock.call.next()] - ) - mock_print.assert_has_calls( - [mock.call("test1", end=""), mock.call("test2", end="")] - ) - - -@mock.patch("builtins.print") -def test_print_command_stdout_not_alive(mock_print: Any) -> None: - """Test the print command stdout with a not alive (exited) process.""" - mock_command = mock.MagicMock() - mock_command.is_alive.return_value = False - mock_command.stdout = "test" - - print_command_stdout(mock_command) - mock_command.assert_has_calls([mock.call.is_alive()]) - mock_print.assert_called_once_with("test") - - -def test_terminate_command_no_process() -> None: - """Test command termination when process does not exist.""" - mock_command = mock.MagicMock() - mock_command.process.signal_group.side_effect = ProcessLookupError() - - terminate_command(mock_command) - mock_command.process.signal_group.assert_called_once() - mock_command.is_alive.assert_not_called() - - -def test_terminate_command() -> None: - """Test command termination.""" - mock_command = mock.MagicMock() - mock_command.is_alive.return_value = False - - terminate_command(mock_command) - mock_command.process.signal_group.assert_called_once() - - -def test_terminate_command_case1() -> None: - """Test command termination when it takes time..""" - mock_command = mock.MagicMock() - mock_command.is_alive.side_effect = [True, True, False] - - terminate_command(mock_command, wait_period=0.1) - mock_command.process.signal_group.assert_called_once() - assert mock_command.is_alive.call_count == 3 - - -def test_terminate_command_case2() -> None: - """Test command termination when it takes much time..""" - mock_command = mock.MagicMock() - mock_command.is_alive.side_effect = [True, True, True] - - terminate_command(mock_command, number_of_attempts=3, wait_period=0.1) - assert mock_command.is_alive.call_count == 3 - assert mock_command.process.signal_group.call_count == 2 - - -class TestRunAndWait: - """Test run_and_wait function.""" - - @pytest.fixture(autouse=True) - def setup_method(self, monkeypatch: Any) -> None: - """Init test method.""" - self.execute_command_mock = mock.MagicMock() - monkeypatch.setattr( - "mlia.backend.proc.execute_command", self.execute_command_mock - ) - - self.terminate_command_mock = mock.MagicMock() - monkeypatch.setattr( - "mlia.backend.proc.terminate_command", self.terminate_command_mock - ) - - def test_if_execute_command_raises_exception(self) -> None: - """Test if execute_command fails.""" - self.execute_command_mock.side_effect = Exception("Error!") - with pytest.raises(Exception, match="Error!"): - run_and_wait("command", Path.cwd()) - - def test_if_command_finishes_with_error(self) -> None: - """Test if command finishes with error.""" - cmd_mock = mock.MagicMock() - self.execute_command_mock.return_value = cmd_mock - exit_code_mock = mock.PropertyMock( - side_effect=ErrorReturnCode("cmd", bytearray(), bytearray()) - ) - type(cmd_mock).exit_code = exit_code_mock - - with pytest.raises(CommandFailedException): - run_and_wait("command", Path.cwd()) - - @pytest.mark.parametrize("terminate_on_error, call_count", ((False, 0), (True, 1))) - def test_if_command_finishes_with_exception( - self, terminate_on_error: bool, call_count: int - ) -> None: - """Test if command finishes with error.""" - cmd_mock = mock.MagicMock() - self.execute_command_mock.return_value = cmd_mock - exit_code_mock = mock.PropertyMock(side_effect=Exception("Error!")) - type(cmd_mock).exit_code = exit_code_mock - - with pytest.raises(Exception, match="Error!"): - run_and_wait("command", Path.cwd(), terminate_on_error=terminate_on_error) - - assert self.terminate_command_mock.call_count == call_count - - -def test_parse_command() -> None: - """Test parse_command function.""" - assert parse_command("1.sh") == ["bash", "1.sh"] - # The following line raises a B604 bandit error. In our case we specify - # what shell to use instead of using the default one. It is a safe use - # we are ignoring this instance. - assert parse_command("1.sh", shell="sh") == ["sh", "1.sh"] # nosec - assert parse_command("command") == ["command"] - assert parse_command("command 123 --param=1") == ["command", "123", "--param=1"] diff --git a/tests/test_backend_source.py b/tests/test_backend_source.py deleted file mode 100644 index c6ef26f..0000000 --- a/tests/test_backend_source.py +++ /dev/null @@ -1,202 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for the source backend module.""" -from collections import Counter -from contextlib import ExitStack as does_not_raise -from pathlib import Path -from typing import Any -from unittest.mock import MagicMock -from unittest.mock import patch - -import pytest - -from mlia.backend.common import ConfigurationException -from mlia.backend.source import create_destination_and_install -from mlia.backend.source import DirectorySource -from mlia.backend.source import get_source -from mlia.backend.source import TarArchiveSource - - -def test_create_destination_and_install(test_systems_path: Path, tmpdir: Any) -> None: - """Test create_destination_and_install function.""" - system_directory = test_systems_path / "system1" - - dir_source = DirectorySource(system_directory) - resources = Path(tmpdir) - create_destination_and_install(dir_source, resources) - assert (resources / "system1").is_dir() - - -@patch("mlia.backend.source.DirectorySource.create_destination", return_value=False) -def test_create_destination_and_install_if_dest_creation_not_required( - mock_ds_create_destination: Any, tmpdir: Any -) -> None: - """Test create_destination_and_install function.""" - dir_source = DirectorySource(Path("unknown")) - resources = Path(tmpdir) - with pytest.raises(Exception): - create_destination_and_install(dir_source, resources) - - mock_ds_create_destination.assert_called_once() - - -def test_create_destination_and_install_if_installation_fails(tmpdir: Any) -> None: - """Test create_destination_and_install function if installation fails.""" - dir_source = DirectorySource(Path("unknown")) - resources = Path(tmpdir) - with pytest.raises(Exception, match="Directory .* does not exist"): - create_destination_and_install(dir_source, resources) - assert not (resources / "unknown").exists() - assert resources.exists() - - -def test_create_destination_and_install_if_name_is_empty() -> None: - """Test create_destination_and_install function fails if source name is empty.""" - source = MagicMock() - source.create_destination.return_value = True - source.name.return_value = None - - with pytest.raises(Exception, match="Unable to get source name"): - create_destination_and_install(source, Path("some_path")) - - source.install_into.assert_not_called() - - -@pytest.mark.parametrize( - "source_path, expected_class, expected_error", - [ - ( - Path("backends/applications/application1/"), - DirectorySource, - does_not_raise(), - ), - ( - Path("archives/applications/application1.tar.gz"), - TarArchiveSource, - does_not_raise(), - ), - ( - Path("doesnt/exist"), - None, - pytest.raises( - ConfigurationException, match="Unable to read .*doesnt/exist" - ), - ), - ], -) -def test_get_source( - source_path: Path, - expected_class: Any, - expected_error: Any, - test_resources_path: Path, -) -> None: - """Test get_source function.""" - with expected_error: - full_source_path = test_resources_path / source_path - source = get_source(full_source_path) - assert isinstance(source, expected_class) - - -class TestDirectorySource: - """Test DirectorySource class.""" - - @pytest.mark.parametrize( - "directory, name", - [ - (Path("/some/path/some_system"), "some_system"), - (Path("some_system"), "some_system"), - ], - ) - def test_name(self, directory: Path, name: str) -> None: - """Test getting source name.""" - assert DirectorySource(directory).name() == name - - def test_install_into(self, test_systems_path: Path, tmpdir: Any) -> None: - """Test install directory into destination.""" - system_directory = test_systems_path / "system1" - - dir_source = DirectorySource(system_directory) - with pytest.raises(Exception, match="Wrong destination .*"): - dir_source.install_into(Path("unknown_destination")) - - tmpdir_path = Path(tmpdir) - dir_source.install_into(tmpdir_path) - source_files = [f.name for f in system_directory.iterdir()] - dest_files = [f.name for f in tmpdir_path.iterdir()] - assert Counter(source_files) == Counter(dest_files) - - def test_install_into_unknown_source_directory(self, tmpdir: Any) -> None: - """Test install system from unknown directory.""" - with pytest.raises(Exception, match="Directory .* does not exist"): - DirectorySource(Path("unknown_directory")).install_into(Path(tmpdir)) - - -class TestTarArchiveSource: - """Test TarArchiveSource class.""" - - @pytest.mark.parametrize( - "archive, name", - [ - (Path("some_archive.tgz"), "some_archive"), - (Path("some_archive.tar.gz"), "some_archive"), - (Path("some_archive"), "some_archive"), - ("archives/systems/system1.tar.gz", "system1"), - ("archives/systems/system1_dir.tar.gz", "system1"), - ], - ) - def test_name(self, test_resources_path: Path, archive: Path, name: str) -> None: - """Test getting source name.""" - assert TarArchiveSource(test_resources_path / archive).name() == name - - def test_install_into(self, test_resources_path: Path, tmpdir: Any) -> None: - """Test install archive into destination.""" - system_archive = test_resources_path / "archives/systems/system1.tar.gz" - - tar_source = TarArchiveSource(system_archive) - with pytest.raises(Exception, match="Wrong destination .*"): - tar_source.install_into(Path("unknown_destination")) - - tmpdir_path = Path(tmpdir) - tar_source.install_into(tmpdir_path) - source_files = [ - "backend-config.json.license", - "backend-config.json", - "system_artifact", - ] - dest_files = [f.name for f in tmpdir_path.iterdir()] - assert Counter(source_files) == Counter(dest_files) - - def test_install_into_unknown_source_archive(self, tmpdir: Any) -> None: - """Test install unknown source archive.""" - with pytest.raises(Exception, match="File .* does not exist"): - TarArchiveSource(Path("unknown.tar.gz")).install_into(Path(tmpdir)) - - def test_install_into_unsupported_source_archive(self, tmpdir: Any) -> None: - """Test install unsupported file type.""" - plain_text_file = Path(tmpdir) / "test_file" - plain_text_file.write_text("Not a system config") - - with pytest.raises(Exception, match="Unsupported archive type .*"): - TarArchiveSource(plain_text_file).install_into(Path(tmpdir)) - - def test_lazy_property_init(self, test_resources_path: Path) -> None: - """Test that class properties initialized correctly.""" - system_archive = test_resources_path / "archives/systems/system1.tar.gz" - - tar_source = TarArchiveSource(system_archive) - assert tar_source.name() == "system1" - assert tar_source.config() is not None - assert tar_source.create_destination() - - tar_source = TarArchiveSource(system_archive) - assert tar_source.config() is not None - assert tar_source.create_destination() - assert tar_source.name() == "system1" - - def test_create_destination_property(self, test_resources_path: Path) -> None: - """Test create_destination property filled correctly for different archives.""" - system_archive1 = test_resources_path / "archives/systems/system1.tar.gz" - system_archive2 = test_resources_path / "archives/systems/system1_dir.tar.gz" - - assert TarArchiveSource(system_archive1).create_destination() - assert not TarArchiveSource(system_archive2).create_destination() diff --git a/tests/test_backend_system.py b/tests/test_backend_system.py deleted file mode 100644 index ecc149d..0000000 --- a/tests/test_backend_system.py +++ /dev/null @@ -1,356 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for system backend.""" -from __future__ import annotations - -from contextlib import ExitStack as does_not_raise -from pathlib import Path -from typing import Any -from unittest.mock import MagicMock - -import pytest - -from mlia.backend.common import Command -from mlia.backend.common import ConfigurationException -from mlia.backend.common import Param -from mlia.backend.common import UserParamConfig -from mlia.backend.config import SystemConfig -from mlia.backend.system import get_available_systems -from mlia.backend.system import get_system -from mlia.backend.system import install_system -from mlia.backend.system import load_system -from mlia.backend.system import remove_system -from mlia.backend.system import System - - -def test_get_available_systems() -> None: - """Test get_available_systems mocking get_resources.""" - available_systems = get_available_systems() - assert all(isinstance(s, System) for s in available_systems) - assert len(available_systems) == 4 - assert [str(s) for s in available_systems] == [ - "System 1", - "System 2", - "System 4", - "System 6", - ] - - -def test_get_system() -> None: - """Test get_system.""" - system1 = get_system("System 1") - assert isinstance(system1, System) - assert system1.name == "System 1" - - system2 = get_system("System 2") - # check that comparison with object of another type returns false - assert system1 != 42 - assert system1 != system2 - - with pytest.raises( - ConfigurationException, match="System 'Unknown system' not found." - ): - get_system("Unknown system") - - -@pytest.mark.parametrize( - "source, call_count, exception_type", - ( - ( - "archives/systems/system1.tar.gz", - 0, - pytest.raises(Exception, match="Systems .* are already installed"), - ), - ( - "archives/systems/system3.tar.gz", - 0, - pytest.raises(Exception, match="Unable to read system definition"), - ), - ( - "backends/systems/system1", - 0, - pytest.raises(Exception, match="Systems .* are already installed"), - ), - ( - "backends/systems/system3", - 0, - pytest.raises(Exception, match="Unable to read system definition"), - ), - ("unknown_path", 0, pytest.raises(Exception, match="Unable to read")), - ( - "various/systems/system_with_empty_config", - 0, - pytest.raises(Exception, match="No system definition found"), - ), - ("various/systems/system_with_valid_config", 1, does_not_raise()), - ), -) -def test_install_system( - monkeypatch: Any, - test_resources_path: Path, - source: str, - call_count: int, - exception_type: Any, -) -> None: - """Test system installation from archive.""" - mock_create_destination_and_install = MagicMock() - monkeypatch.setattr( - "mlia.backend.system.create_destination_and_install", - mock_create_destination_and_install, - ) - - with exception_type: - install_system(test_resources_path / source) - - assert mock_create_destination_and_install.call_count == call_count - - -def test_remove_system(monkeypatch: Any) -> None: - """Test system removal.""" - mock_remove_backend = MagicMock() - monkeypatch.setattr("mlia.backend.system.remove_backend", mock_remove_backend) - remove_system("some_system_dir") - mock_remove_backend.assert_called_once() - - -def test_system() -> None: - """Test the System class.""" - config = SystemConfig(name="System 1") - system = System(config) - assert str(system) == "System 1" - assert system.name == "System 1" - - -def test_system_with_empty_parameter_name() -> None: - """Test that configuration fails if parameter name is empty.""" - bad_config = SystemConfig( - name="System 1", - commands={"run": ["run"]}, - user_params={"run": [{"name": "", "values": ["1", "2", "3"]}]}, - ) - with pytest.raises(Exception, match="Parameter has an empty 'name' attribute."): - System(bad_config) - - -def test_system_run() -> None: - """Test run operation for system.""" - system = get_system("System 4") - assert isinstance(system, System) - - system.run("echo 'application run'") - - -def test_system_start_no_config_location() -> None: - """Test that system without config location could not start.""" - system = load_system(SystemConfig(name="test")) - - assert isinstance(system, System) - with pytest.raises( - ConfigurationException, match="System has invalid config location: None" - ): - system.run("sleep 100") - - -@pytest.mark.parametrize( - "config, expected_class, expected_error", - [ - ( - SystemConfig(name="test"), - System, - does_not_raise(), - ), - (SystemConfig(), None, pytest.raises(ConfigurationException)), - ], -) -def test_load_system( - config: SystemConfig, expected_class: type, expected_error: Any -) -> None: - """Test load_system function.""" - if not expected_class: - with expected_error: - load_system(config) - else: - system = load_system(config) - assert isinstance(system, expected_class) - - -def test_load_system_populate_shared_params() -> None: - """Test shared parameters population.""" - with pytest.raises(Exception, match="All shared parameters should have aliases"): - load_system( - SystemConfig( - name="test_system", - user_params={ - "shared": [ - UserParamConfig( - name="--shared_param1", - description="Shared parameter", - values=["1", "2", "3"], - default_value="1", - ) - ] - }, - ) - ) - - with pytest.raises( - Exception, match="All parameters for command run should have aliases" - ): - load_system( - SystemConfig( - name="test_system", - user_params={ - "shared": [ - UserParamConfig( - name="--shared_param1", - description="Shared parameter", - values=["1", "2", "3"], - default_value="1", - alias="shared_param1", - ) - ], - "run": [ - UserParamConfig( - name="--run_param1", - description="Run specific parameter", - values=["1", "2", "3"], - default_value="2", - ) - ], - }, - ) - ) - system0 = load_system( - SystemConfig( - name="test_system", - commands={"run": ["run_command"]}, - user_params={ - "shared": [], - "run": [ - UserParamConfig( - name="--run_param1", - description="Run specific parameter", - values=["1", "2", "3"], - default_value="2", - alias="run_param1", - ) - ], - }, - ) - ) - assert len(system0.commands) == 1 - run_command1 = system0.commands["run"] - assert run_command1 == Command( - ["run_command"], - [ - Param( - "--run_param1", - "Run specific parameter", - ["1", "2", "3"], - "2", - "run_param1", - ) - ], - ) - - system1 = load_system( - SystemConfig( - name="test_system", - user_params={ - "shared": [ - UserParamConfig( - name="--shared_param1", - description="Shared parameter", - values=["1", "2", "3"], - default_value="1", - alias="shared_param1", - ) - ], - "run": [ - UserParamConfig( - name="--run_param1", - description="Run specific parameter", - values=["1", "2", "3"], - default_value="2", - alias="run_param1", - ) - ], - }, - ) - ) - assert len(system1.commands) == 1 - - run_command1 = system1.commands["run"] - assert run_command1 == Command( - [], - [ - Param( - "--shared_param1", - "Shared parameter", - ["1", "2", "3"], - "1", - "shared_param1", - ), - Param( - "--run_param1", - "Run specific parameter", - ["1", "2", "3"], - "2", - "run_param1", - ), - ], - ) - - system2 = load_system( - SystemConfig( - name="test_system", - commands={"build": ["build_command"]}, - user_params={ - "shared": [ - UserParamConfig( - name="--shared_param1", - description="Shared parameter", - values=["1", "2", "3"], - default_value="1", - alias="shared_param1", - ) - ], - "run": [ - UserParamConfig( - name="--run_param1", - description="Run specific parameter", - values=["1", "2", "3"], - default_value="2", - alias="run_param1", - ) - ], - }, - ) - ) - assert len(system2.commands) == 2 - build_command2 = system2.commands["build"] - assert build_command2 == Command( - ["build_command"], - [], - ) - - run_command2 = system1.commands["run"] - assert run_command2 == Command( - [], - [ - Param( - "--shared_param1", - "Shared parameter", - ["1", "2", "3"], - "1", - "shared_param1", - ), - Param( - "--run_param1", - "Run specific parameter", - ["1", "2", "3"], - "2", - "run_param1", - ), - ], - ) diff --git a/tests/test_backend_tosa_checker_install.py b/tests/test_backend_tosa_checker_install.py new file mode 100644 index 0000000..0393f0b --- /dev/null +++ b/tests/test_backend_tosa_checker_install.py @@ -0,0 +1,50 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for python package based installations.""" +from pathlib import Path +from unittest.mock import MagicMock + +import pytest + +from mlia.backend.install import DownloadAndInstall +from mlia.backend.install import InstallFromPath +from mlia.backend.install import PyPackageBackendInstallation +from mlia.backend.tosa_checker.install import get_tosa_backend_installation + + +def test_get_tosa_backend_installation( + tmp_path: Path, monkeypatch: pytest.MonkeyPatch +) -> None: + """Test function get_tosa_backend_installation.""" + mock_package_manager = MagicMock() + monkeypatch.setattr( + "mlia.backend.install.get_package_manager", + lambda: mock_package_manager, + ) + + tosa_installation = get_tosa_backend_installation() + + assert isinstance(tosa_installation, PyPackageBackendInstallation) + assert tosa_installation.name == "tosa-checker" + assert ( + tosa_installation.description + == "Tool to check if a ML model is compatible with the TOSA specification" + ) + assert tosa_installation.could_be_installed + assert tosa_installation.supports(DownloadAndInstall()) + assert not tosa_installation.supports(InstallFromPath(tmp_path)) + + mock_package_manager.packages_installed.return_value = True + assert tosa_installation.already_installed + mock_package_manager.packages_installed.assert_called_once_with(["tosa-checker"]) + + with pytest.raises(Exception, match=r"Unsupported installation type.*"): + tosa_installation.install(InstallFromPath(tmp_path)) + + mock_package_manager.install.assert_not_called() + + tosa_installation.install(DownloadAndInstall()) + mock_package_manager.install.assert_called_once_with(["mlia[tosa]"]) + + tosa_installation.uninstall() + mock_package_manager.uninstall.assert_called_once_with(["tosa-checker"]) diff --git a/tests/test_backend_vela_compat.py b/tests/test_backend_vela_compat.py new file mode 100644 index 0000000..6f7a41c --- /dev/null +++ b/tests/test_backend_vela_compat.py @@ -0,0 +1,74 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for module vela/compat.""" +from pathlib import Path + +import pytest + +from mlia.backend.vela.compat import generate_supported_operators_report +from mlia.backend.vela.compat import NpuSupported +from mlia.backend.vela.compat import Operator +from mlia.backend.vela.compat import Operators +from mlia.backend.vela.compat import supported_operators +from mlia.devices.ethosu.config import EthosUConfiguration +from mlia.utils.filesystem import working_directory + + +@pytest.mark.parametrize( + "model, expected_ops", + [ + ( + "test_model.tflite", + Operators( + ops=[ + Operator( + name="sequential/conv1/Relu;sequential/conv1/BiasAdd;" + "sequential/conv2/Conv2D;sequential/conv1/Conv2D", + op_type="CONV_2D", + run_on_npu=NpuSupported(supported=True, reasons=[]), + ), + Operator( + name="sequential/conv2/Relu;sequential/conv2/BiasAdd;" + "sequential/conv2/Conv2D", + op_type="CONV_2D", + run_on_npu=NpuSupported(supported=True, reasons=[]), + ), + Operator( + name="sequential/max_pooling2d/MaxPool", + op_type="MAX_POOL_2D", + run_on_npu=NpuSupported(supported=True, reasons=[]), + ), + Operator( + name="sequential/flatten/Reshape", + op_type="RESHAPE", + run_on_npu=NpuSupported(supported=True, reasons=[]), + ), + Operator( + name="Identity", + op_type="FULLY_CONNECTED", + run_on_npu=NpuSupported(supported=True, reasons=[]), + ), + ] + ), + ) + ], +) +def test_operators(test_models_path: Path, model: str, expected_ops: Operators) -> None: + """Test operators function.""" + device = EthosUConfiguration("ethos-u55-256") + + operators = supported_operators(test_models_path / model, device.compiler_options) + for expected, actual in zip(expected_ops.ops, operators.ops): + # do not compare names as they could be different on each model generation + assert expected.op_type == actual.op_type + assert expected.run_on_npu == actual.run_on_npu + + +def test_generate_supported_operators_report(tmp_path: Path) -> None: + """Test generating supported operators report.""" + with working_directory(tmp_path): + generate_supported_operators_report() + + md_file = tmp_path / "SUPPORTED_OPS.md" + assert md_file.is_file() + assert md_file.stat().st_size > 0 diff --git a/tests/test_backend_vela_compiler.py b/tests/test_backend_vela_compiler.py new file mode 100644 index 0000000..40268ae --- /dev/null +++ b/tests/test_backend_vela_compiler.py @@ -0,0 +1,163 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for module vela/compiler.""" +from pathlib import Path + +from ethosu.vela.compiler_driver import TensorAllocator +from ethosu.vela.scheduler import OptimizationStrategy + +from mlia.backend.vela.compiler import optimize_model +from mlia.backend.vela.compiler import OptimizedModel +from mlia.backend.vela.compiler import VelaCompiler +from mlia.backend.vela.compiler import VelaCompilerOptions +from mlia.devices.ethosu.config import EthosUConfiguration + + +def test_default_vela_compiler() -> None: + """Test default Vela compiler instance.""" + default_compiler_options = VelaCompilerOptions(accelerator_config="ethos-u55-256") + default_compiler = VelaCompiler(default_compiler_options) + + assert default_compiler.config_files is None + assert default_compiler.system_config == "internal-default" + assert default_compiler.memory_mode == "internal-default" + assert default_compiler.accelerator_config == "ethos-u55-256" + assert default_compiler.max_block_dependency == 3 + assert default_compiler.arena_cache_size is None + assert default_compiler.tensor_allocator == TensorAllocator.HillClimb + assert default_compiler.cpu_tensor_alignment == 16 + assert default_compiler.optimization_strategy == OptimizationStrategy.Performance + assert default_compiler.output_dir is None + + assert default_compiler.get_config() == { + "accelerator_config": "ethos-u55-256", + "system_config": "internal-default", + "core_clock": 500000000.0, + "axi0_port": "Sram", + "axi1_port": "OffChipFlash", + "memory_mode": "internal-default", + "const_mem_area": "Axi1", + "arena_mem_area": "Axi0", + "cache_mem_area": "Axi0", + "arena_cache_size": 4294967296, + "permanent_storage_mem_area": "OffChipFlash", + "feature_map_storage_mem_area": "Sram", + "fast_storage_mem_area": "Sram", + "memory_area": { + "Sram": { + "clock_scales": 1.0, + "burst_length": 32, + "read_latency": 32, + "write_latency": 32, + }, + "Dram": { + "clock_scales": 1.0, + "burst_length": 1, + "read_latency": 0, + "write_latency": 0, + }, + "OnChipFlash": { + "clock_scales": 1.0, + "burst_length": 1, + "read_latency": 0, + "write_latency": 0, + }, + "OffChipFlash": { + "clock_scales": 0.125, + "burst_length": 128, + "read_latency": 64, + "write_latency": 64, + }, + }, + } + + +def test_vela_compiler_with_parameters(test_resources_path: Path) -> None: + """Test creation of Vela compiler instance with non-default params.""" + vela_ini_path = str(test_resources_path / "vela/sample_vela.ini") + + compiler_options = VelaCompilerOptions( + config_files=vela_ini_path, + system_config="Ethos_U65_High_End", + memory_mode="Shared_Sram", + accelerator_config="ethos-u65-256", + max_block_dependency=1, + arena_cache_size=10, + tensor_allocator="Greedy", + cpu_tensor_alignment=4, + optimization_strategy="Size", + output_dir="output", + ) + compiler = VelaCompiler(compiler_options) + + assert compiler.config_files == vela_ini_path + assert compiler.system_config == "Ethos_U65_High_End" + assert compiler.memory_mode == "Shared_Sram" + assert compiler.accelerator_config == "ethos-u65-256" + assert compiler.max_block_dependency == 1 + assert compiler.arena_cache_size == 10 + assert compiler.tensor_allocator == TensorAllocator.Greedy + assert compiler.cpu_tensor_alignment == 4 + assert compiler.optimization_strategy == OptimizationStrategy.Size + assert compiler.output_dir == "output" + + assert compiler.get_config() == { + "accelerator_config": "ethos-u65-256", + "system_config": "Ethos_U65_High_End", + "core_clock": 1000000000.0, + "axi0_port": "Sram", + "axi1_port": "Dram", + "memory_mode": "Shared_Sram", + "const_mem_area": "Axi1", + "arena_mem_area": "Axi0", + "cache_mem_area": "Axi0", + "arena_cache_size": 10, + "permanent_storage_mem_area": "Dram", + "feature_map_storage_mem_area": "Sram", + "fast_storage_mem_area": "Sram", + "memory_area": { + "Sram": { + "clock_scales": 1.0, + "burst_length": 32, + "read_latency": 32, + "write_latency": 32, + }, + "Dram": { + "clock_scales": 0.234375, + "burst_length": 128, + "read_latency": 500, + "write_latency": 250, + }, + "OnChipFlash": { + "clock_scales": 1.0, + "burst_length": 1, + "read_latency": 0, + "write_latency": 0, + }, + "OffChipFlash": { + "clock_scales": 1.0, + "burst_length": 1, + "read_latency": 0, + "write_latency": 0, + }, + }, + } + + +def test_compile_model(test_tflite_model: Path) -> None: + """Test model optimization.""" + compiler = VelaCompiler(EthosUConfiguration("ethos-u55-256").compiler_options) + + optimized_model = compiler.compile_model(test_tflite_model) + assert isinstance(optimized_model, OptimizedModel) + + +def test_optimize_model(tmp_path: Path, test_tflite_model: Path) -> None: + """Test model optimization and saving into file.""" + tmp_file = tmp_path / "temp.tflite" + + device = EthosUConfiguration("ethos-u55-256") + optimize_model(test_tflite_model, device.compiler_options, tmp_file.absolute()) + + assert tmp_file.is_file() + assert tmp_file.stat().st_size > 0 diff --git a/tests/test_backend_vela_performance.py b/tests/test_backend_vela_performance.py new file mode 100644 index 0000000..a1c806c --- /dev/null +++ b/tests/test_backend_vela_performance.py @@ -0,0 +1,64 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for module vela/performance.""" +from pathlib import Path +from unittest.mock import MagicMock + +import pytest + +from mlia.backend.vela.compiler import optimize_model +from mlia.backend.vela.performance import estimate_performance +from mlia.backend.vela.performance import PerformanceMetrics +from mlia.devices.ethosu.config import EthosUConfiguration + + +def test_estimate_performance(test_tflite_model: Path) -> None: + """Test getting performance estimations.""" + device = EthosUConfiguration("ethos-u55-256") + perf_metrics = estimate_performance(test_tflite_model, device.compiler_options) + + assert isinstance(perf_metrics, PerformanceMetrics) + + +def test_estimate_performance_already_optimized( + tmp_path: Path, test_tflite_model: Path +) -> None: + """Test that performance estimation should fail for already optimized model.""" + device = EthosUConfiguration("ethos-u55-256") + + optimized_model_path = tmp_path / "optimized_model.tflite" + + optimize_model(test_tflite_model, device.compiler_options, optimized_model_path) + + with pytest.raises( + Exception, match="Unable to estimate performance for the given optimized model" + ): + estimate_performance(optimized_model_path, device.compiler_options) + + +def test_read_invalid_model(test_tflite_invalid_model: Path) -> None: + """Test that reading invalid model should fail with exception.""" + with pytest.raises( + Exception, match=f"Unable to read model {test_tflite_invalid_model}" + ): + device = EthosUConfiguration("ethos-u55-256") + estimate_performance(test_tflite_invalid_model, device.compiler_options) + + +def test_compile_invalid_model( + test_tflite_model: Path, monkeypatch: pytest.MonkeyPatch, tmp_path: Path +) -> None: + """Test that if model could not be compiled then correct exception raised.""" + mock_compiler = MagicMock() + mock_compiler.side_effect = Exception("Bad model!") + + monkeypatch.setattr("mlia.backend.vela.compiler.compiler_driver", mock_compiler) + + model_path = tmp_path / "optimized_model.tflite" + with pytest.raises( + Exception, match="Model could not be optimized with Vela compiler" + ): + device = EthosUConfiguration("ethos-u55-256") + optimize_model(test_tflite_model, device.compiler_options, model_path) + + assert not model_path.exists() diff --git a/tests/test_cli_commands.py b/tests/test_cli_commands.py index 3a01f78..77e1f88 100644 --- a/tests/test_cli_commands.py +++ b/tests/test_cli_commands.py @@ -10,6 +10,7 @@ from unittest.mock import MagicMock import pytest +from mlia.backend.manager import DefaultInstallationManager from mlia.cli.commands import backend_install from mlia.cli.commands import backend_list from mlia.cli.commands import backend_uninstall @@ -21,7 +22,6 @@ from mlia.devices.ethosu.config import EthosUConfiguration from mlia.devices.ethosu.performance import MemoryUsage from mlia.devices.ethosu.performance import NPUCycles from mlia.devices.ethosu.performance import PerformanceMetrics -from mlia.tools.metadata.common import DefaultInstallationManager def test_operators_expected_parameters(sample_context: ExecutionContext) -> None: diff --git a/tests/test_devices_ethosu_config.py b/tests/test_devices_ethosu_config.py index d4e043f..2fec0d5 100644 --- a/tests/test_devices_ethosu_config.py +++ b/tests/test_devices_ethosu_config.py @@ -9,9 +9,9 @@ from unittest.mock import MagicMock import pytest +from mlia.backend.vela.compiler import VelaCompilerOptions from mlia.devices.ethosu.config import EthosUConfiguration from mlia.devices.ethosu.config import get_target -from mlia.tools.vela_wrapper import VelaCompilerOptions from mlia.utils.filesystem import get_vela_config diff --git a/tests/test_devices_ethosu_data_analysis.py b/tests/test_devices_ethosu_data_analysis.py index 26aae76..8184c70 100644 --- a/tests/test_devices_ethosu_data_analysis.py +++ b/tests/test_devices_ethosu_data_analysis.py @@ -5,6 +5,9 @@ from __future__ import annotations import pytest +from mlia.backend.vela.compat import NpuSupported +from mlia.backend.vela.compat import Operator +from mlia.backend.vela.compat import Operators from mlia.core.common import DataItem from mlia.core.data_analysis import Fact from mlia.devices.ethosu.config import EthosUConfiguration @@ -20,9 +23,6 @@ from mlia.devices.ethosu.performance import NPUCycles from mlia.devices.ethosu.performance import OptimizationPerformanceMetrics from mlia.devices.ethosu.performance import PerformanceMetrics from mlia.nn.tensorflow.optimizations.select import OptimizationSettings -from mlia.tools.vela_wrapper import NpuSupported -from mlia.tools.vela_wrapper import Operator -from mlia.tools.vela_wrapper import Operators def test_perf_metrics_diff() -> None: diff --git a/tests/test_devices_ethosu_data_collection.py b/tests/test_devices_ethosu_data_collection.py index a4f37aa..84b9424 100644 --- a/tests/test_devices_ethosu_data_collection.py +++ b/tests/test_devices_ethosu_data_collection.py @@ -6,6 +6,7 @@ from unittest.mock import MagicMock import pytest +from mlia.backend.vela.compat import Operators from mlia.core.context import Context from mlia.core.data_collection import DataCollector from mlia.core.errors import FunctionalityNotSupportedError @@ -18,7 +19,6 @@ from mlia.devices.ethosu.performance import NPUCycles from mlia.devices.ethosu.performance import OptimizationPerformanceMetrics from mlia.devices.ethosu.performance import PerformanceMetrics from mlia.nn.tensorflow.optimizations.select import OptimizationSettings -from mlia.tools.vela_wrapper import Operators @pytest.mark.parametrize( diff --git a/tests/test_devices_ethosu_performance.py b/tests/test_devices_ethosu_performance.py index b3e5298..3ff73d8 100644 --- a/tests/test_devices_ethosu_performance.py +++ b/tests/test_devices_ethosu_performance.py @@ -23,6 +23,6 @@ def test_memory_usage_conversion() -> None: def mock_performance_estimation(monkeypatch: pytest.MonkeyPatch) -> None: """Mock performance estimation.""" monkeypatch.setattr( - "mlia.backend.manager.estimate_performance", + "mlia.backend.corstone.performance.estimate_performance", MagicMock(return_value=MagicMock()), ) diff --git a/tests/test_devices_ethosu_reporters.py b/tests/test_devices_ethosu_reporters.py index f04270c..926c4c3 100644 --- a/tests/test_devices_ethosu_reporters.py +++ b/tests/test_devices_ethosu_reporters.py @@ -13,6 +13,9 @@ from typing import Literal import pytest +from mlia.backend.vela.compat import NpuSupported +from mlia.backend.vela.compat import Operator +from mlia.backend.vela.compat import Operators from mlia.core.reporting import get_reporter from mlia.core.reporting import produce_report from mlia.core.reporting import Report @@ -26,9 +29,6 @@ from mlia.devices.ethosu.reporters import ethos_u_formatters from mlia.devices.ethosu.reporters import report_device_details from mlia.devices.ethosu.reporters import report_operators from mlia.devices.ethosu.reporters import report_perf_metrics -from mlia.tools.vela_wrapper import NpuSupported -from mlia.tools.vela_wrapper import Operator -from mlia.tools.vela_wrapper import Operators from mlia.utils.console import remove_ascii_codes diff --git a/tests/test_tools_metadata_common.py b/tests/test_tools_metadata_common.py deleted file mode 100644 index 9811852..0000000 --- a/tests/test_tools_metadata_common.py +++ /dev/null @@ -1,282 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for commmon installation related functions.""" -from __future__ import annotations - -from pathlib import Path -from typing import Any -from unittest.mock import call -from unittest.mock import MagicMock -from unittest.mock import PropertyMock - -import pytest - -from mlia.tools.metadata.common import DefaultInstallationManager -from mlia.tools.metadata.common import DownloadAndInstall -from mlia.tools.metadata.common import Installation -from mlia.tools.metadata.common import InstallationType -from mlia.tools.metadata.common import InstallFromPath - - -def get_default_installation_manager_mock( - name: str, - already_installed: bool = False, -) -> MagicMock: - """Get mock instance for DefaultInstallationManager.""" - mock = MagicMock(spec=DefaultInstallationManager) - - props = { - "name": name, - "already_installed": already_installed, - } - for prop, value in props.items(): - setattr(type(mock), prop, PropertyMock(return_value=value)) - - return mock - - -def _ready_for_uninstall_mock() -> MagicMock: - return get_default_installation_manager_mock( - name="already_installed", - already_installed=True, - ) - - -def get_installation_mock( - name: str, - already_installed: bool = False, - could_be_installed: bool = False, - supported_install_type: type | tuple | None = None, -) -> MagicMock: - """Get mock instance for the installation.""" - mock = MagicMock(spec=Installation) - - def supports(install_type: InstallationType) -> bool: - if supported_install_type is None: - return False - - return isinstance(install_type, supported_install_type) - - mock.supports.side_effect = supports - - props = { - "name": name, - "already_installed": already_installed, - "could_be_installed": could_be_installed, - } - for prop, value in props.items(): - setattr(type(mock), prop, PropertyMock(return_value=value)) - - return mock - - -def _already_installed_mock() -> MagicMock: - return get_installation_mock( - name="already_installed", - already_installed=True, - supported_install_type=(DownloadAndInstall, InstallFromPath), - ) - - -def _ready_for_installation_mock() -> MagicMock: - return get_installation_mock( - name="ready_for_installation", - already_installed=False, - could_be_installed=True, - ) - - -def _could_be_downloaded_and_installed_mock() -> MagicMock: - return get_installation_mock( - name="could_be_downloaded_and_installed", - already_installed=False, - could_be_installed=True, - supported_install_type=DownloadAndInstall, - ) - - -def _could_be_installed_from_mock() -> MagicMock: - return get_installation_mock( - name="could_be_installed_from", - already_installed=False, - could_be_installed=True, - supported_install_type=InstallFromPath, - ) - - -def get_installation_manager( - noninteractive: bool, - installations: list[Any], - monkeypatch: pytest.MonkeyPatch, - yes_response: bool = True, -) -> DefaultInstallationManager: - """Get installation manager instance.""" - if not noninteractive: - monkeypatch.setattr( - "mlia.tools.metadata.common.yes", MagicMock(return_value=yes_response) - ) - - return DefaultInstallationManager(installations, noninteractive=noninteractive) - - -def test_installation_manager_filtering() -> None: - """Test default installation manager.""" - already_installed = _already_installed_mock() - ready_for_installation = _ready_for_installation_mock() - could_be_downloaded_and_installed = _could_be_downloaded_and_installed_mock() - - manager = DefaultInstallationManager( - [ - already_installed, - ready_for_installation, - could_be_downloaded_and_installed, - ] - ) - assert manager.already_installed("already_installed") == [already_installed] - assert manager.ready_for_installation() == [ - ready_for_installation, - could_be_downloaded_and_installed, - ] - - -@pytest.mark.parametrize("noninteractive", [True, False]) -@pytest.mark.parametrize( - "install_mock, eula_agreement, backend_name, force, expected_call", - [ - [ - _could_be_downloaded_and_installed_mock(), - True, - "could_be_downloaded_and_installed", - False, - [call(DownloadAndInstall(eula_agreement=True))], - ], - [ - _could_be_downloaded_and_installed_mock(), - False, - "could_be_downloaded_and_installed", - True, - [call(DownloadAndInstall(eula_agreement=False))], - ], - [ - _already_installed_mock(), - False, - "already_installed", - True, - [call(DownloadAndInstall(eula_agreement=False))], - ], - [ - _could_be_downloaded_and_installed_mock(), - False, - "unknown", - True, - [], - ], - ], -) -def test_installation_manager_download_and_install( - install_mock: MagicMock, - noninteractive: bool, - eula_agreement: bool, - backend_name: str, - force: bool, - expected_call: Any, - monkeypatch: pytest.MonkeyPatch, -) -> None: - """Test installation process.""" - install_mock.reset_mock() - - manager = get_installation_manager(noninteractive, [install_mock], monkeypatch) - - manager.download_and_install( - backend_name, eula_agreement=eula_agreement, force=force - ) - - assert install_mock.install.mock_calls == expected_call - if force and install_mock.already_installed: - install_mock.uninstall.assert_called_once() - else: - install_mock.uninstall.assert_not_called() - - -@pytest.mark.parametrize("noninteractive", [True, False]) -@pytest.mark.parametrize( - "install_mock, backend_name, force, expected_call", - [ - [ - _could_be_installed_from_mock(), - "could_be_installed_from", - False, - [call(InstallFromPath(Path("some_path")))], - ], - [ - _could_be_installed_from_mock(), - "unknown", - False, - [], - ], - [ - _could_be_installed_from_mock(), - "unknown", - True, - [], - ], - [ - _already_installed_mock(), - "already_installed", - False, - [], - ], - [ - _already_installed_mock(), - "already_installed", - True, - [call(InstallFromPath(Path("some_path")))], - ], - ], -) -def test_installation_manager_install_from( - install_mock: MagicMock, - noninteractive: bool, - backend_name: str, - force: bool, - expected_call: Any, - monkeypatch: pytest.MonkeyPatch, -) -> None: - """Test installation process.""" - install_mock.reset_mock() - - manager = get_installation_manager(noninteractive, [install_mock], monkeypatch) - manager.install_from(Path("some_path"), backend_name, force=force) - - assert install_mock.install.mock_calls == expected_call - if force and install_mock.already_installed: - install_mock.uninstall.assert_called_once() - else: - install_mock.uninstall.assert_not_called() - - -@pytest.mark.parametrize("noninteractive", [True, False]) -@pytest.mark.parametrize( - "install_mock, backend_name, expected_call", - [ - [ - _ready_for_uninstall_mock(), - "already_installed", - [call()], - ], - ], -) -def test_installation_manager_uninstall( - install_mock: MagicMock, - noninteractive: bool, - backend_name: str, - expected_call: Any, - monkeypatch: pytest.MonkeyPatch, -) -> None: - """Test uninstallation.""" - install_mock.reset_mock() - - manager = get_installation_manager(noninteractive, [install_mock], monkeypatch) - manager.uninstall(backend_name) - - assert install_mock.uninstall.mock_calls == expected_call diff --git a/tests/test_tools_metadata_corstone.py b/tests/test_tools_metadata_corstone.py deleted file mode 100644 index a7d81f2..0000000 --- a/tests/test_tools_metadata_corstone.py +++ /dev/null @@ -1,488 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for Corstone related installation functions..""" -from __future__ import annotations - -import tarfile -from pathlib import Path -from typing import Iterable -from unittest.mock import MagicMock - -import pytest - -from mlia.backend.manager import BackendRunner -from mlia.tools.metadata.common import DownloadAndInstall -from mlia.tools.metadata.common import InstallFromPath -from mlia.tools.metadata.corstone import BackendInfo -from mlia.tools.metadata.corstone import BackendInstallation -from mlia.tools.metadata.corstone import BackendInstaller -from mlia.tools.metadata.corstone import BackendMetadata -from mlia.tools.metadata.corstone import CompoundPathChecker -from mlia.tools.metadata.corstone import Corstone300Installer -from mlia.tools.metadata.corstone import get_corstone_300_installation -from mlia.tools.metadata.corstone import get_corstone_310_installation -from mlia.tools.metadata.corstone import get_corstone_installations -from mlia.tools.metadata.corstone import PackagePathChecker -from mlia.tools.metadata.corstone import PathChecker -from mlia.tools.metadata.corstone import StaticPathChecker - - -@pytest.fixture(name="test_mlia_resources") -def fixture_test_mlia_resources( - tmp_path: Path, monkeypatch: pytest.MonkeyPatch -) -> Path: - """Redirect MLIA resources resolution to the temp directory.""" - mlia_resources = tmp_path / "resources" - mlia_resources.mkdir() - - monkeypatch.setattr( - "mlia.tools.metadata.corstone.get_mlia_resources", - MagicMock(return_value=mlia_resources), - ) - - return mlia_resources - - -def get_backend_installation( # pylint: disable=too-many-arguments - backend_runner_mock: MagicMock = MagicMock(), - name: str = "test_name", - description: str = "test_description", - download_artifact: MagicMock | None = None, - path_checker: PathChecker = MagicMock(), - apps_resources: list[str] | None = None, - system_config: str | None = None, - backend_installer: BackendInstaller = MagicMock(), - supported_platforms: list[str] | None = None, -) -> BackendInstallation: - """Get backend installation.""" - return BackendInstallation( - backend_runner=backend_runner_mock, - metadata=BackendMetadata( - name=name, - description=description, - system_config=system_config or "", - apps_resources=apps_resources or [], - fvp_dir_name="sample_dir", - download_artifact=download_artifact, - supported_platforms=supported_platforms, - ), - path_checker=path_checker, - backend_installer=backend_installer, - ) - - -@pytest.mark.parametrize( - "platform, supported_platforms, expected_result", - [ - ["Linux", ["Linux"], True], - ["Linux", [], True], - ["Linux", None, True], - ["Windows", ["Linux"], False], - ], -) -def test_could_be_installed_depends_on_platform( - platform: str, - supported_platforms: list[str] | None, - expected_result: bool, - monkeypatch: pytest.MonkeyPatch, -) -> None: - """Test that installation could not be installed on unsupported platform.""" - monkeypatch.setattr( - "mlia.tools.metadata.corstone.platform.system", MagicMock(return_value=platform) - ) - monkeypatch.setattr( - "mlia.tools.metadata.corstone.all_paths_valid", MagicMock(return_value=True) - ) - backend_runner_mock = MagicMock(spec=BackendRunner) - - installation = get_backend_installation( - backend_runner_mock, - supported_platforms=supported_platforms, - ) - assert installation.could_be_installed == expected_result - - -def test_get_corstone_installations() -> None: - """Test function get_corstone_installation.""" - installs = get_corstone_installations() - assert len(installs) == 2 - assert all(isinstance(install, BackendInstallation) for install in installs) - - -def test_backend_installation_metadata_resolving() -> None: - """Test backend installation metadata resolving.""" - backend_runner_mock = MagicMock(spec=BackendRunner) - installation = get_backend_installation(backend_runner_mock) - - assert installation.name == "test_name" - assert installation.description == "test_description" - - backend_runner_mock.all_installed.return_value = False - assert installation.already_installed is False - - assert installation.could_be_installed is True - - -def test_backend_installation_supported_install_types(tmp_path: Path) -> None: - """Test supported installation types.""" - installation_no_download_artifact = get_backend_installation() - assert installation_no_download_artifact.supports(DownloadAndInstall()) is False - - installation_with_download_artifact = get_backend_installation( - download_artifact=MagicMock() - ) - assert installation_with_download_artifact.supports(DownloadAndInstall()) is True - - path_checker_mock = MagicMock(return_value=BackendInfo(tmp_path)) - installation_can_install_from_dir = get_backend_installation( - path_checker=path_checker_mock - ) - assert installation_can_install_from_dir.supports(InstallFromPath(tmp_path)) is True - - any_installation = get_backend_installation() - assert any_installation.supports("unknown_install_type") is False # type: ignore - - -def test_backend_installation_install_wrong_type() -> None: - """Test that operation should fail if wrong install type provided.""" - with pytest.raises(Exception, match="Unable to install wrong_install_type"): - backend_runner_mock = MagicMock(spec=BackendRunner) - installation = get_backend_installation(backend_runner_mock) - - installation.install("wrong_install_type") # type: ignore - - -def test_backend_installation_install_from_path( - tmp_path: Path, test_mlia_resources: Path -) -> None: - """Test installation from the path.""" - system_config = test_mlia_resources / "example_config.json" - system_config.touch() - - sample_app = test_mlia_resources / "sample_app" - sample_app.mkdir() - - dist_dir = tmp_path / "dist" - dist_dir.mkdir() - - path_checker_mock = MagicMock(return_value=BackendInfo(dist_dir)) - - backend_runner_mock = MagicMock(spec=BackendRunner) - installation = get_backend_installation( - backend_runner_mock=backend_runner_mock, - path_checker=path_checker_mock, - apps_resources=[sample_app.name], - system_config="example_config.json", - ) - - assert installation.supports(InstallFromPath(dist_dir)) is True - installation.install(InstallFromPath(dist_dir)) - - backend_runner_mock.install_system.assert_called_once() - backend_runner_mock.install_application.assert_called_once_with(sample_app) - - -@pytest.mark.parametrize("copy_source", [True, False]) -def test_backend_installation_install_from_static_path( - tmp_path: Path, test_mlia_resources: Path, copy_source: bool -) -> None: - """Test installation from the predefined path.""" - system_config = test_mlia_resources / "example_config.json" - system_config.touch() - - custom_system_config = test_mlia_resources / "custom_config.json" - custom_system_config.touch() - - sample_app = test_mlia_resources / "sample_app" - sample_app.mkdir() - - predefined_location = tmp_path / "backend" - predefined_location.mkdir() - - predefined_location_file = predefined_location / "file.txt" - predefined_location_file.touch() - - predefined_location_dir = predefined_location / "folder" - predefined_location_dir.mkdir() - nested_file = predefined_location_dir / "nested_file.txt" - nested_file.touch() - - backend_runner_mock = MagicMock(spec=BackendRunner) - - def check_install_dir(install_dir: Path) -> None: - """Check content of the install dir.""" - assert install_dir.is_dir() - files = list(install_dir.iterdir()) - - if copy_source: - assert len(files) == 3 - assert all(install_dir / item in files for item in ["file.txt", "folder"]) - assert (install_dir / "folder/nested_file.txt").is_file() - else: - assert len(files) == 1 - - assert install_dir / "custom_config.json" in files - - backend_runner_mock.install_system.side_effect = check_install_dir - - installation = get_backend_installation( - backend_runner_mock=backend_runner_mock, - path_checker=StaticPathChecker( - predefined_location, - ["file.txt"], - copy_source=copy_source, - system_config=str(custom_system_config), - ), - apps_resources=[sample_app.name], - system_config="example_config.json", - ) - - assert installation.supports(InstallFromPath(predefined_location)) is True - installation.install(InstallFromPath(predefined_location)) - - backend_runner_mock.install_system.assert_called_once() - backend_runner_mock.install_application.assert_called_once_with(sample_app) - - -def create_sample_fvp_archive(tmp_path: Path) -> Path: - """Create sample FVP tar archive.""" - fvp_archive_dir = tmp_path / "archive" - fvp_archive_dir.mkdir() - - sample_file = fvp_archive_dir / "sample.txt" - sample_file.write_text("Sample file") - - sample_dir = fvp_archive_dir / "sample_dir" - sample_dir.mkdir() - - fvp_archive = tmp_path / "archive.tgz" - with tarfile.open(fvp_archive, "w:gz") as fvp_archive_tar: - fvp_archive_tar.add(fvp_archive_dir, arcname=fvp_archive_dir.name) - - return fvp_archive - - -def test_backend_installation_download_and_install( - test_mlia_resources: Path, tmp_path: Path -) -> None: - """Test downloading and installation process.""" - fvp_archive = create_sample_fvp_archive(tmp_path) - - system_config = test_mlia_resources / "example_config.json" - system_config.touch() - - download_artifact_mock = MagicMock() - download_artifact_mock.download_to.return_value = fvp_archive - - path_checker = PackagePathChecker(["archive/sample.txt"], "archive/sample_dir") - - def installer(_eula_agreement: bool, dist_dir: Path) -> Path: - """Sample installer.""" - return dist_dir - - backend_runner_mock = MagicMock(spec=BackendRunner) - installation = get_backend_installation( - backend_runner_mock, - download_artifact=download_artifact_mock, - backend_installer=installer, - path_checker=path_checker, - system_config="example_config.json", - ) - - installation.install(DownloadAndInstall()) - - backend_runner_mock.install_system.assert_called_once() - - -@pytest.mark.parametrize( - "dir_content, expected_result", - [ - [ - ["models/", "file1.txt", "file2.txt"], - "models", - ], - [ - ["file1.txt", "file2.txt"], - None, - ], - [ - ["models/", "file2.txt"], - None, - ], - ], -) -def test_corstone_path_checker_valid_path( - tmp_path: Path, dir_content: list[str], expected_result: str | None -) -> None: - """Test Corstone path checker valid scenario.""" - path_checker = PackagePathChecker(["file1.txt", "file2.txt"], "models") - - for item in dir_content: - if item.endswith("/"): - item_dir = tmp_path / item - item_dir.mkdir() - else: - item_file = tmp_path / item - item_file.touch() - - result = path_checker(tmp_path) - expected = ( - None if expected_result is None else BackendInfo(tmp_path / expected_result) - ) - - assert result == expected - - -@pytest.mark.parametrize("system_config", [None, "system_config"]) -@pytest.mark.parametrize("copy_source", [True, False]) -def test_static_path_checker( - tmp_path: Path, copy_source: bool, system_config: str | None -) -> None: - """Test static path checker.""" - static_checker = StaticPathChecker( - tmp_path, [], copy_source=copy_source, system_config=system_config - ) - assert static_checker(tmp_path) == BackendInfo( - tmp_path, copy_source=copy_source, system_config=system_config - ) - - -def test_static_path_checker_not_valid_path(tmp_path: Path) -> None: - """Test static path checker should return None if path is not valid.""" - static_checker = StaticPathChecker(tmp_path, ["file.txt"]) - assert static_checker(tmp_path / "backend") is None - - -def test_static_path_checker_not_valid_structure(tmp_path: Path) -> None: - """Test static path checker should return None if files are missing.""" - static_checker = StaticPathChecker(tmp_path, ["file.txt"]) - assert static_checker(tmp_path) is None - - missing_file = tmp_path / "file.txt" - missing_file.touch() - - assert static_checker(tmp_path) == BackendInfo(tmp_path, copy_source=False) - - -def test_compound_path_checker(tmp_path: Path) -> None: - """Test compound path checker.""" - path_checker_path_valid_path = MagicMock(return_value=BackendInfo(tmp_path)) - path_checker_path_not_valid_path = MagicMock(return_value=None) - - checker = CompoundPathChecker( - path_checker_path_valid_path, path_checker_path_not_valid_path - ) - assert checker(tmp_path) == BackendInfo(tmp_path) - - checker = CompoundPathChecker(path_checker_path_not_valid_path) - assert checker(tmp_path) is None - - -@pytest.mark.parametrize( - "eula_agreement, expected_command", - [ - [ - True, - [ - "./FVP_Corstone_SSE-300.sh", - "-q", - "-d", - "corstone-300", - ], - ], - [ - False, - [ - "./FVP_Corstone_SSE-300.sh", - "-q", - "-d", - "corstone-300", - "--nointeractive", - "--i-agree-to-the-contained-eula", - ], - ], - ], -) -def test_corstone_300_installer( - tmp_path: Path, - monkeypatch: pytest.MonkeyPatch, - eula_agreement: bool, - expected_command: list[str], -) -> None: - """Test Corstone-300 installer.""" - command_mock = MagicMock() - - monkeypatch.setattr( - "mlia.tools.metadata.corstone.subprocess.check_call", command_mock - ) - installer = Corstone300Installer() - result = installer(eula_agreement, tmp_path) - - command_mock.assert_called_once_with(expected_command) - assert result == tmp_path / "corstone-300" - - -@pytest.mark.parametrize( - "corstone_installation, expected_paths", - [ - [ - get_corstone_300_installation(), - { - "/opt/VHT/VHT_Corstone_SSE-300_Ethos-U55", - "/opt/VHT/VHT_Corstone_SSE-300_Ethos-U65", - }, - ], - [ - get_corstone_310_installation(), - { - "/opt/VHT/VHT_Corstone_SSE-310", - "/opt/VHT/VHT_Corstone_SSE-310_Ethos-U65", - }, - ], - ], -) -def test_corstone_vht_install( - corstone_installation: BackendInstallation, - expected_paths: set, - monkeypatch: pytest.MonkeyPatch, -) -> None: - """Test if Corstone 300/310 could be installed from /opt/VHT.""" - - def _all_files_exist(paths: Iterable[Path]) -> bool: - """Check if all files exist.""" - pathset = {item.as_posix() for item in paths} - return pathset == expected_paths - - create_destination_and_install_mock = MagicMock() - - monkeypatch.setattr( - "mlia.tools.metadata.corstone.all_files_exist", _all_files_exist - ) - - monkeypatch.setattr("mlia.backend.system.get_available_systems", lambda: []) - - monkeypatch.setattr( - "mlia.backend.system.create_destination_and_install", - create_destination_and_install_mock, - ) - - corstone_installation.install(InstallFromPath(Path("/opt/VHT"))) - - create_destination_and_install_mock.assert_called_once() - - -def test_corstone_uninstall( - monkeypatch: pytest.MonkeyPatch, -) -> None: - """Test the uninstall function in Corstone.""" - remove_system_mock = MagicMock() - - monkeypatch.setattr( - "mlia.tools.metadata.corstone.remove_system", - remove_system_mock, - ) - - installation = get_corstone_300_installation() - - installation.uninstall() - remove_system_mock.assert_called_once_with("corstone_300") diff --git a/tests/test_tools_metadata_py_package.py b/tests/test_tools_metadata_py_package.py deleted file mode 100644 index 8b93e33..0000000 --- a/tests/test_tools_metadata_py_package.py +++ /dev/null @@ -1,62 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for python package based installations.""" -from pathlib import Path -from unittest.mock import MagicMock - -import pytest - -from mlia.tools.metadata.common import DownloadAndInstall -from mlia.tools.metadata.common import InstallFromPath -from mlia.tools.metadata.py_package import get_pypackage_backend_installations -from mlia.tools.metadata.py_package import get_tosa_backend_installation -from mlia.tools.metadata.py_package import PyPackageBackendInstallation - - -def test_get_pypackage_backends() -> None: - """Test function get_pypackage_backends.""" - backend_installs = get_pypackage_backend_installations() - - assert isinstance(backend_installs, list) - assert len(backend_installs) == 1 - - tosa_installation = backend_installs[0] - assert isinstance(tosa_installation, PyPackageBackendInstallation) - - -def test_get_tosa_backend_installation( - tmp_path: Path, monkeypatch: pytest.MonkeyPatch -) -> None: - """Test function get_tosa_backend_installation.""" - mock_package_manager = MagicMock() - monkeypatch.setattr( - "mlia.tools.metadata.py_package.get_package_manager", - lambda: mock_package_manager, - ) - - tosa_installation = get_tosa_backend_installation() - - assert isinstance(tosa_installation, PyPackageBackendInstallation) - assert tosa_installation.name == "tosa-checker" - assert ( - tosa_installation.description - == "Tool to check if a ML model is compatible with the TOSA specification" - ) - assert tosa_installation.could_be_installed - assert tosa_installation.supports(DownloadAndInstall()) - assert not tosa_installation.supports(InstallFromPath(tmp_path)) - - mock_package_manager.packages_installed.return_value = True - assert tosa_installation.already_installed - mock_package_manager.packages_installed.assert_called_once_with(["tosa-checker"]) - - with pytest.raises(Exception, match=r"Unsupported installation type.*"): - tosa_installation.install(InstallFromPath(tmp_path)) - - mock_package_manager.install.assert_not_called() - - tosa_installation.install(DownloadAndInstall()) - mock_package_manager.install.assert_called_once_with(["mlia[tosa]"]) - - tosa_installation.uninstall() - mock_package_manager.uninstall.assert_called_once_with(["tosa-checker"]) diff --git a/tests/test_tools_vela_wrapper.py b/tests/test_tools_vela_wrapper.py deleted file mode 100644 index 0efcb0f..0000000 --- a/tests/test_tools_vela_wrapper.py +++ /dev/null @@ -1,285 +0,0 @@ -# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. -# SPDX-License-Identifier: Apache-2.0 -"""Tests for module tools/vela_wrapper.""" -from pathlib import Path -from unittest.mock import MagicMock - -import pytest -from ethosu.vela.compiler_driver import TensorAllocator -from ethosu.vela.scheduler import OptimizationStrategy - -from mlia.devices.ethosu.config import EthosUConfiguration -from mlia.tools.vela_wrapper import estimate_performance -from mlia.tools.vela_wrapper import generate_supported_operators_report -from mlia.tools.vela_wrapper import NpuSupported -from mlia.tools.vela_wrapper import Operator -from mlia.tools.vela_wrapper import Operators -from mlia.tools.vela_wrapper import optimize_model -from mlia.tools.vela_wrapper import OptimizedModel -from mlia.tools.vela_wrapper import PerformanceMetrics -from mlia.tools.vela_wrapper import supported_operators -from mlia.tools.vela_wrapper import VelaCompiler -from mlia.tools.vela_wrapper import VelaCompilerOptions -from mlia.utils.filesystem import working_directory - - -def test_default_vela_compiler() -> None: - """Test default Vela compiler instance.""" - default_compiler_options = VelaCompilerOptions(accelerator_config="ethos-u55-256") - default_compiler = VelaCompiler(default_compiler_options) - - assert default_compiler.config_files is None - assert default_compiler.system_config == "internal-default" - assert default_compiler.memory_mode == "internal-default" - assert default_compiler.accelerator_config == "ethos-u55-256" - assert default_compiler.max_block_dependency == 3 - assert default_compiler.arena_cache_size is None - assert default_compiler.tensor_allocator == TensorAllocator.HillClimb - assert default_compiler.cpu_tensor_alignment == 16 - assert default_compiler.optimization_strategy == OptimizationStrategy.Performance - assert default_compiler.output_dir is None - - assert default_compiler.get_config() == { - "accelerator_config": "ethos-u55-256", - "system_config": "internal-default", - "core_clock": 500000000.0, - "axi0_port": "Sram", - "axi1_port": "OffChipFlash", - "memory_mode": "internal-default", - "const_mem_area": "Axi1", - "arena_mem_area": "Axi0", - "cache_mem_area": "Axi0", - "arena_cache_size": 4294967296, - "permanent_storage_mem_area": "OffChipFlash", - "feature_map_storage_mem_area": "Sram", - "fast_storage_mem_area": "Sram", - "memory_area": { - "Sram": { - "clock_scales": 1.0, - "burst_length": 32, - "read_latency": 32, - "write_latency": 32, - }, - "Dram": { - "clock_scales": 1.0, - "burst_length": 1, - "read_latency": 0, - "write_latency": 0, - }, - "OnChipFlash": { - "clock_scales": 1.0, - "burst_length": 1, - "read_latency": 0, - "write_latency": 0, - }, - "OffChipFlash": { - "clock_scales": 0.125, - "burst_length": 128, - "read_latency": 64, - "write_latency": 64, - }, - }, - } - - -def test_vela_compiler_with_parameters(test_resources_path: Path) -> None: - """Test creation of Vela compiler instance with non-default params.""" - vela_ini_path = str(test_resources_path / "vela/sample_vela.ini") - - compiler_options = VelaCompilerOptions( - config_files=vela_ini_path, - system_config="Ethos_U65_High_End", - memory_mode="Shared_Sram", - accelerator_config="ethos-u65-256", - max_block_dependency=1, - arena_cache_size=10, - tensor_allocator="Greedy", - cpu_tensor_alignment=4, - optimization_strategy="Size", - output_dir="output", - ) - compiler = VelaCompiler(compiler_options) - - assert compiler.config_files == vela_ini_path - assert compiler.system_config == "Ethos_U65_High_End" - assert compiler.memory_mode == "Shared_Sram" - assert compiler.accelerator_config == "ethos-u65-256" - assert compiler.max_block_dependency == 1 - assert compiler.arena_cache_size == 10 - assert compiler.tensor_allocator == TensorAllocator.Greedy - assert compiler.cpu_tensor_alignment == 4 - assert compiler.optimization_strategy == OptimizationStrategy.Size - assert compiler.output_dir == "output" - - assert compiler.get_config() == { - "accelerator_config": "ethos-u65-256", - "system_config": "Ethos_U65_High_End", - "core_clock": 1000000000.0, - "axi0_port": "Sram", - "axi1_port": "Dram", - "memory_mode": "Shared_Sram", - "const_mem_area": "Axi1", - "arena_mem_area": "Axi0", - "cache_mem_area": "Axi0", - "arena_cache_size": 10, - "permanent_storage_mem_area": "Dram", - "feature_map_storage_mem_area": "Sram", - "fast_storage_mem_area": "Sram", - "memory_area": { - "Sram": { - "clock_scales": 1.0, - "burst_length": 32, - "read_latency": 32, - "write_latency": 32, - }, - "Dram": { - "clock_scales": 0.234375, - "burst_length": 128, - "read_latency": 500, - "write_latency": 250, - }, - "OnChipFlash": { - "clock_scales": 1.0, - "burst_length": 1, - "read_latency": 0, - "write_latency": 0, - }, - "OffChipFlash": { - "clock_scales": 1.0, - "burst_length": 1, - "read_latency": 0, - "write_latency": 0, - }, - }, - } - - -def test_compile_model(test_tflite_model: Path) -> None: - """Test model optimization.""" - compiler = VelaCompiler(EthosUConfiguration("ethos-u55-256").compiler_options) - - optimized_model = compiler.compile_model(test_tflite_model) - assert isinstance(optimized_model, OptimizedModel) - - -def test_optimize_model(tmp_path: Path, test_tflite_model: Path) -> None: - """Test model optimization and saving into file.""" - tmp_file = tmp_path / "temp.tflite" - - device = EthosUConfiguration("ethos-u55-256") - optimize_model(test_tflite_model, device.compiler_options, tmp_file.absolute()) - - assert tmp_file.is_file() - assert tmp_file.stat().st_size > 0 - - -@pytest.mark.parametrize( - "model, expected_ops", - [ - ( - "test_model.tflite", - Operators( - ops=[ - Operator( - name="sequential/conv1/Relu;sequential/conv1/BiasAdd;" - "sequential/conv2/Conv2D;sequential/conv1/Conv2D", - op_type="CONV_2D", - run_on_npu=NpuSupported(supported=True, reasons=[]), - ), - Operator( - name="sequential/conv2/Relu;sequential/conv2/BiasAdd;" - "sequential/conv2/Conv2D", - op_type="CONV_2D", - run_on_npu=NpuSupported(supported=True, reasons=[]), - ), - Operator( - name="sequential/max_pooling2d/MaxPool", - op_type="MAX_POOL_2D", - run_on_npu=NpuSupported(supported=True, reasons=[]), - ), - Operator( - name="sequential/flatten/Reshape", - op_type="RESHAPE", - run_on_npu=NpuSupported(supported=True, reasons=[]), - ), - Operator( - name="Identity", - op_type="FULLY_CONNECTED", - run_on_npu=NpuSupported(supported=True, reasons=[]), - ), - ] - ), - ) - ], -) -def test_operators(test_models_path: Path, model: str, expected_ops: Operators) -> None: - """Test operators function.""" - device = EthosUConfiguration("ethos-u55-256") - - operators = supported_operators(test_models_path / model, device.compiler_options) - for expected, actual in zip(expected_ops.ops, operators.ops): - # do not compare names as they could be different on each model generation - assert expected.op_type == actual.op_type - assert expected.run_on_npu == actual.run_on_npu - - -def test_estimate_performance(test_tflite_model: Path) -> None: - """Test getting performance estimations.""" - device = EthosUConfiguration("ethos-u55-256") - perf_metrics = estimate_performance(test_tflite_model, device.compiler_options) - - assert isinstance(perf_metrics, PerformanceMetrics) - - -def test_estimate_performance_already_optimized( - tmp_path: Path, test_tflite_model: Path -) -> None: - """Test that performance estimation should fail for already optimized model.""" - device = EthosUConfiguration("ethos-u55-256") - - optimized_model_path = tmp_path / "optimized_model.tflite" - - optimize_model(test_tflite_model, device.compiler_options, optimized_model_path) - - with pytest.raises( - Exception, match="Unable to estimate performance for the given optimized model" - ): - estimate_performance(optimized_model_path, device.compiler_options) - - -def test_generate_supported_operators_report(tmp_path: Path) -> None: - """Test generating supported operators report.""" - with working_directory(tmp_path): - generate_supported_operators_report() - - md_file = tmp_path / "SUPPORTED_OPS.md" - assert md_file.is_file() - assert md_file.stat().st_size > 0 - - -def test_read_invalid_model(test_tflite_invalid_model: Path) -> None: - """Test that reading invalid model should fail with exception.""" - with pytest.raises( - Exception, match=f"Unable to read model {test_tflite_invalid_model}" - ): - device = EthosUConfiguration("ethos-u55-256") - estimate_performance(test_tflite_invalid_model, device.compiler_options) - - -def test_compile_invalid_model( - test_tflite_model: Path, monkeypatch: pytest.MonkeyPatch, tmp_path: Path -) -> None: - """Test that if model could not be compiled then correct exception raised.""" - mock_compiler = MagicMock() - mock_compiler.side_effect = Exception("Bad model!") - - monkeypatch.setattr("mlia.tools.vela_wrapper.compiler_driver", mock_compiler) - - model_path = tmp_path / "optimized_model.tflite" - with pytest.raises( - Exception, match="Model could not be optimized with Vela compiler" - ): - device = EthosUConfiguration("ethos-u55-256") - optimize_model(test_tflite_model, device.compiler_options, model_path) - - assert not model_path.exists() -- cgit v1.2.1