aboutsummaryrefslogtreecommitdiff
path: root/tests/mlia
diff options
context:
space:
mode:
Diffstat (limited to 'tests/mlia')
-rw-r--r--tests/mlia/conftest.py91
-rw-r--r--tests/mlia/test_backend_application.py460
-rw-r--r--tests/mlia/test_backend_common.py486
-rw-r--r--tests/mlia/test_backend_controller.py160
-rw-r--r--tests/mlia/test_backend_execution.py518
-rw-r--r--tests/mlia/test_backend_fs.py168
-rw-r--r--tests/mlia/test_backend_manager.py (renamed from tests/mlia/test_tools_aiet_wrapper.py)326
-rw-r--r--tests/mlia/test_backend_output_parser.py152
-rw-r--r--tests/mlia/test_backend_proc.py272
-rw-r--r--tests/mlia/test_backend_protocol.py231
-rw-r--r--tests/mlia/test_backend_source.py203
-rw-r--r--tests/mlia/test_backend_system.py541
-rw-r--r--tests/mlia/test_cli_logging.py10
-rw-r--r--tests/mlia/test_devices_ethosu_performance.py2
-rw-r--r--tests/mlia/test_resources/application_config.json96
-rw-r--r--tests/mlia/test_resources/application_config.json.license3
-rw-r--r--tests/mlia/test_resources/backends/applications/application1/aiet-config.json30
-rw-r--r--tests/mlia/test_resources/backends/applications/application1/aiet-config.json.license3
-rw-r--r--tests/mlia/test_resources/backends/applications/application2/aiet-config.json30
-rw-r--r--tests/mlia/test_resources/backends/applications/application2/aiet-config.json.license3
-rw-r--r--tests/mlia/test_resources/backends/applications/application3/readme.txt4
-rw-r--r--tests/mlia/test_resources/backends/applications/application4/aiet-config.json36
-rw-r--r--tests/mlia/test_resources/backends/applications/application4/aiet-config.json.license3
-rw-r--r--tests/mlia/test_resources/backends/applications/application4/hello_app.txt4
-rw-r--r--tests/mlia/test_resources/backends/applications/application5/aiet-config.json160
-rw-r--r--tests/mlia/test_resources/backends/applications/application5/aiet-config.json.license3
-rw-r--r--tests/mlia/test_resources/backends/applications/application6/aiet-config.json42
-rw-r--r--tests/mlia/test_resources/backends/applications/application6/aiet-config.json.license3
-rw-r--r--tests/mlia/test_resources/backends/applications/readme.txt4
-rw-r--r--tests/mlia/test_resources/backends/systems/system1/aiet-config.json35
-rw-r--r--tests/mlia/test_resources/backends/systems/system1/aiet-config.json.license3
-rw-r--r--tests/mlia/test_resources/backends/systems/system1/system_artifact/dummy.txt2
-rw-r--r--tests/mlia/test_resources/backends/systems/system2/aiet-config.json32
-rw-r--r--tests/mlia/test_resources/backends/systems/system2/aiet-config.json.license3
-rw-r--r--tests/mlia/test_resources/backends/systems/system3/readme.txt4
-rw-r--r--tests/mlia/test_resources/backends/systems/system4/aiet-config.json19
-rw-r--r--tests/mlia/test_resources/backends/systems/system4/aiet-config.json.license3
-rw-r--r--tests/mlia/test_resources/backends/systems/system6/aiet-config.json34
-rw-r--r--tests/mlia/test_resources/backends/systems/system6/aiet-config.json.license3
-rw-r--r--tests/mlia/test_resources/hello_world.json54
-rw-r--r--tests/mlia/test_resources/hello_world.json.license3
-rwxr-xr-xtests/mlia/test_resources/scripts/test_backend_run8
-rw-r--r--tests/mlia/test_resources/scripts/test_backend_run_script.sh8
-rw-r--r--tests/mlia/test_resources/various/applications/application_with_empty_config/aiet-config.json1
-rw-r--r--tests/mlia/test_resources/various/applications/application_with_empty_config/aiet-config.json.license3
-rw-r--r--tests/mlia/test_resources/various/applications/application_with_valid_config/aiet-config.json35
-rw-r--r--tests/mlia/test_resources/various/applications/application_with_valid_config/aiet-config.json.license3
-rw-r--r--tests/mlia/test_resources/various/applications/application_with_wrong_config1/aiet-config.json2
-rw-r--r--tests/mlia/test_resources/various/applications/application_with_wrong_config1/aiet-config.json.license3
-rw-r--r--tests/mlia/test_resources/various/applications/application_with_wrong_config2/aiet-config.json30
-rw-r--r--tests/mlia/test_resources/various/applications/application_with_wrong_config2/aiet-config.json.license3
-rw-r--r--tests/mlia/test_resources/various/applications/application_with_wrong_config3/aiet-config.json35
-rw-r--r--tests/mlia/test_resources/various/applications/application_with_wrong_config3/aiet-config.json.license3
-rw-r--r--tests/mlia/test_resources/various/systems/system_with_empty_config/aiet-config.json1
-rw-r--r--tests/mlia/test_resources/various/systems/system_with_empty_config/aiet-config.json.license3
-rw-r--r--tests/mlia/test_resources/various/systems/system_with_valid_config/aiet-config.json16
-rw-r--r--tests/mlia/test_resources/various/systems/system_with_valid_config/aiet-config.json.license3
-rw-r--r--tests/mlia/test_tools_metadata_corstone.py90
58 files changed, 4286 insertions, 200 deletions
diff --git a/tests/mlia/conftest.py b/tests/mlia/conftest.py
index f683fca..0b4b2aa 100644
--- a/tests/mlia/conftest.py
+++ b/tests/mlia/conftest.py
@@ -1,7 +1,10 @@
# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Pytest conf module."""
+import shutil
+import tarfile
from pathlib import Path
+from typing import Any
import pytest
@@ -18,3 +21,91 @@ def fixture_test_resources_path() -> Path:
def fixture_dummy_context(tmpdir: str) -> ExecutionContext:
"""Return dummy context fixture."""
return ExecutionContext(working_dir=tmpdir)
+
+
+@pytest.fixture(scope="session")
+def test_systems_path(test_resources_path: Path) -> Path:
+ """Return test systems path in a pytest fixture."""
+ return test_resources_path / "backends" / "systems"
+
+
+@pytest.fixture(scope="session")
+def test_applications_path(test_resources_path: Path) -> Path:
+ """Return test applications path in a pytest fixture."""
+ return test_resources_path / "backends" / "applications"
+
+
+@pytest.fixture(scope="session")
+def non_optimised_input_model_file(test_tflite_model: Path) -> Path:
+ """Provide the path to a quantized dummy model file."""
+ return test_tflite_model
+
+
+@pytest.fixture(scope="session")
+def optimised_input_model_file(test_tflite_vela_model: Path) -> Path:
+ """Provide path to Vela-optimised dummy model file."""
+ return test_tflite_vela_model
+
+
+@pytest.fixture(scope="session")
+def invalid_input_model_file(test_tflite_invalid_model: Path) -> Path:
+ """Provide the path to an invalid dummy model file."""
+ return test_tflite_invalid_model
+
+
+@pytest.fixture(autouse=True)
+def test_resources(monkeypatch: pytest.MonkeyPatch, test_resources_path: Path) -> Any:
+ """Force using test resources as middleware's repository."""
+
+ def get_test_resources() -> Path:
+ """Return path to the test resources."""
+ return test_resources_path / "backends"
+
+ monkeypatch.setattr("mlia.backend.fs.get_backend_resources", get_test_resources)
+ yield
+
+
+def create_archive(
+ archive_name: str, source: Path, destination: Path, with_root_folder: bool = False
+) -> None:
+ """Create archive from directory source."""
+ with tarfile.open(destination / archive_name, mode="w:gz") as tar:
+ for item in source.iterdir():
+ item_name = item.name
+ if with_root_folder:
+ item_name = f"{source.name}/{item_name}"
+ tar.add(item, item_name)
+
+
+def process_directory(source: Path, destination: Path) -> None:
+ """Process resource directory."""
+ destination.mkdir()
+
+ for item in source.iterdir():
+ if item.is_dir():
+ create_archive(f"{item.name}.tar.gz", item, destination)
+ create_archive(f"{item.name}_dir.tar.gz", item, destination, True)
+
+
+@pytest.fixture(scope="session", autouse=True)
+def add_archives(
+ test_resources_path: Path, tmp_path_factory: pytest.TempPathFactory
+) -> Any:
+ """Generate archives of the test resources."""
+ tmp_path = tmp_path_factory.mktemp("archives")
+
+ archives_path = tmp_path / "archives"
+ archives_path.mkdir()
+
+ if (archives_path_link := test_resources_path / "archives").is_symlink():
+ archives_path_link.unlink()
+
+ archives_path_link.symlink_to(archives_path, target_is_directory=True)
+
+ for item in ["applications", "systems"]:
+ process_directory(test_resources_path / "backends" / item, archives_path / item)
+
+ yield
+
+ archives_path_link.unlink()
+ shutil.rmtree(tmp_path)
diff --git a/tests/mlia/test_backend_application.py b/tests/mlia/test_backend_application.py
new file mode 100644
index 0000000..2cfb2ef
--- /dev/null
+++ b/tests/mlia/test_backend_application.py
@@ -0,0 +1,460 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+# pylint: disable=no-self-use
+"""Tests for the application backend."""
+from collections import Counter
+from contextlib import ExitStack as does_not_raise
+from pathlib import Path
+from typing import Any
+from typing import List
+from unittest.mock import MagicMock
+
+import pytest
+
+from mlia.backend.application import Application
+from mlia.backend.application import get_application
+from mlia.backend.application import get_available_application_directory_names
+from mlia.backend.application import get_available_applications
+from mlia.backend.application import get_unique_application_names
+from mlia.backend.application import install_application
+from mlia.backend.application import load_applications
+from mlia.backend.application import remove_application
+from mlia.backend.common import Command
+from mlia.backend.common import DataPaths
+from mlia.backend.common import Param
+from mlia.backend.common import UserParamConfig
+from mlia.backend.config import ApplicationConfig
+from mlia.backend.config import ExtendedApplicationConfig
+from mlia.backend.config import NamedExecutionConfig
+
+
+def test_get_available_application_directory_names() -> None:
+ """Test get_available_applicationss mocking get_resources."""
+ directory_names = get_available_application_directory_names()
+ assert Counter(directory_names) == Counter(
+ [
+ "application1",
+ "application2",
+ "application4",
+ "application5",
+ "application6",
+ ]
+ )
+
+
+def test_get_available_applications() -> None:
+ """Test get_available_applicationss mocking get_resources."""
+ available_applications = get_available_applications()
+
+ assert all(isinstance(s, Application) for s in available_applications)
+ assert all(s != 42 for s in available_applications)
+ assert len(available_applications) == 10
+ # application_5 has multiply items with multiply supported systems
+ assert [str(s) for s in available_applications] == [
+ "application_1",
+ "application_2",
+ "application_4",
+ "application_5",
+ "application_5",
+ "application_5A",
+ "application_5A",
+ "application_5B",
+ "application_5B",
+ "application_6",
+ ]
+
+
+def test_get_unique_application_names() -> None:
+ """Test get_unique_application_names."""
+ unique_names = get_unique_application_names()
+
+ assert all(isinstance(s, str) for s in unique_names)
+ assert all(s for s in unique_names)
+ assert sorted(unique_names) == [
+ "application_1",
+ "application_2",
+ "application_4",
+ "application_5",
+ "application_5A",
+ "application_5B",
+ "application_6",
+ ]
+
+
+def test_get_application() -> None:
+ """Test get_application mocking get_resoures."""
+ application = get_application("application_1")
+ if len(application) != 1:
+ pytest.fail("Unable to get application")
+ assert application[0].name == "application_1"
+
+ application = get_application("unknown application")
+ assert len(application) == 0
+
+
+@pytest.mark.parametrize(
+ "source, call_count, expected_exception",
+ (
+ (
+ "archives/applications/application1.tar.gz",
+ 0,
+ pytest.raises(
+ Exception, match=r"Applications \[application_1\] are already installed"
+ ),
+ ),
+ (
+ "various/applications/application_with_empty_config",
+ 0,
+ pytest.raises(Exception, match="No application definition found"),
+ ),
+ (
+ "various/applications/application_with_wrong_config1",
+ 0,
+ pytest.raises(Exception, match="Unable to read application definition"),
+ ),
+ (
+ "various/applications/application_with_wrong_config2",
+ 0,
+ pytest.raises(Exception, match="Unable to read application definition"),
+ ),
+ (
+ "various/applications/application_with_wrong_config3",
+ 0,
+ pytest.raises(Exception, match="Unable to read application definition"),
+ ),
+ ("various/applications/application_with_valid_config", 1, does_not_raise()),
+ (
+ "archives/applications/application3.tar.gz",
+ 0,
+ pytest.raises(Exception, match="Unable to read application definition"),
+ ),
+ (
+ "backends/applications/application1",
+ 0,
+ pytest.raises(
+ Exception, match=r"Applications \[application_1\] are already installed"
+ ),
+ ),
+ (
+ "backends/applications/application3",
+ 0,
+ pytest.raises(Exception, match="Unable to read application definition"),
+ ),
+ ),
+)
+def test_install_application(
+ monkeypatch: Any,
+ test_resources_path: Path,
+ source: str,
+ call_count: int,
+ expected_exception: Any,
+) -> None:
+ """Test application install from archive."""
+ mock_create_destination_and_install = MagicMock()
+ monkeypatch.setattr(
+ "mlia.backend.application.create_destination_and_install",
+ mock_create_destination_and_install,
+ )
+
+ with expected_exception:
+ install_application(test_resources_path / source)
+ assert mock_create_destination_and_install.call_count == call_count
+
+
+def test_remove_application(monkeypatch: Any) -> None:
+ """Test application removal."""
+ mock_remove_backend = MagicMock()
+ monkeypatch.setattr("mlia.backend.application.remove_backend", mock_remove_backend)
+
+ remove_application("some_application_directory")
+ mock_remove_backend.assert_called_once()
+
+
+def test_application_config_without_commands() -> None:
+ """Test application config without commands."""
+ config = ApplicationConfig(name="application")
+ application = Application(config)
+ # pylint: disable=use-implicit-booleaness-not-comparison
+ assert application.commands == {}
+
+
+class TestApplication:
+ """Test for application class methods."""
+
+ def test___eq__(self) -> None:
+ """Test overloaded __eq__ method."""
+ config = ApplicationConfig(
+ # Application
+ supported_systems=["system1", "system2"],
+ build_dir="build_dir",
+ # inherited from Backend
+ name="name",
+ description="description",
+ commands={},
+ )
+ application1 = Application(config)
+ application2 = Application(config) # Identical
+ assert application1 == application2
+
+ application3 = Application(config) # changed
+ # Change one single attribute so not equal, but same Type
+ setattr(application3, "supported_systems", ["somewhere/else"])
+ assert application1 != application3
+
+ # different Type
+ application4 = "Not the Application you are looking for"
+ assert application1 != application4
+
+ application5 = Application(config)
+ # supported systems could be in any order
+ setattr(application5, "supported_systems", ["system2", "system1"])
+ assert application1 == application5
+
+ def test_can_run_on(self) -> None:
+ """Test Application can run on."""
+ config = ApplicationConfig(name="application", supported_systems=["System-A"])
+
+ application = Application(config)
+ assert application.can_run_on("System-A")
+ assert not application.can_run_on("System-B")
+
+ applications = get_application("application_1", "System 1")
+ assert len(applications) == 1
+ assert applications[0].can_run_on("System 1")
+
+ def test_get_deploy_data(self, tmp_path: Path) -> None:
+ """Test Application can run on."""
+ src, dest = "src", "dest"
+ config = ApplicationConfig(
+ name="application", deploy_data=[(src, dest)], config_location=tmp_path
+ )
+ src_path = tmp_path / src
+ src_path.mkdir()
+ application = Application(config)
+ assert application.get_deploy_data() == [DataPaths(src_path, dest)]
+
+ def test_get_deploy_data_no_config_location(self) -> None:
+ """Test that getting deploy data fails if no config location provided."""
+ with pytest.raises(
+ Exception, match="Unable to get application .* config location"
+ ):
+ Application(ApplicationConfig(name="application")).get_deploy_data()
+
+ def test_unable_to_create_application_without_name(self) -> None:
+ """Test that it is not possible to create application without name."""
+ with pytest.raises(Exception, match="Name is empty"):
+ Application(ApplicationConfig())
+
+ def test_application_config_without_commands(self) -> None:
+ """Test application config without commands."""
+ config = ApplicationConfig(name="application")
+ application = Application(config)
+ # pylint: disable=use-implicit-booleaness-not-comparison
+ assert application.commands == {}
+
+ @pytest.mark.parametrize(
+ "config, expected_params",
+ (
+ (
+ ApplicationConfig(
+ name="application",
+ commands={"command": ["cmd {user_params:0} {user_params:1}"]},
+ user_params={
+ "command": [
+ UserParamConfig(
+ name="--param1", description="param1", alias="param1"
+ ),
+ UserParamConfig(
+ name="--param2", description="param2", alias="param2"
+ ),
+ ]
+ },
+ ),
+ [Param("--param1", "param1"), Param("--param2", "param2")],
+ ),
+ (
+ ApplicationConfig(
+ name="application",
+ commands={"command": ["cmd {user_params:param1} {user_params:1}"]},
+ user_params={
+ "command": [
+ UserParamConfig(
+ name="--param1", description="param1", alias="param1"
+ ),
+ UserParamConfig(
+ name="--param2", description="param2", alias="param2"
+ ),
+ ]
+ },
+ ),
+ [Param("--param1", "param1"), Param("--param2", "param2")],
+ ),
+ (
+ ApplicationConfig(
+ name="application",
+ commands={"command": ["cmd {user_params:param1}"]},
+ user_params={
+ "command": [
+ UserParamConfig(
+ name="--param1", description="param1", alias="param1"
+ ),
+ UserParamConfig(
+ name="--param2", description="param2", alias="param2"
+ ),
+ ]
+ },
+ ),
+ [Param("--param1", "param1")],
+ ),
+ ),
+ )
+ def test_remove_unused_params(
+ self, config: ApplicationConfig, expected_params: List[Param]
+ ) -> None:
+ """Test mod remove_unused_parameter."""
+ application = Application(config)
+ application.remove_unused_params()
+ assert application.commands["command"].params == expected_params
+
+
+@pytest.mark.parametrize(
+ "config, expected_error",
+ (
+ (
+ ExtendedApplicationConfig(name="application"),
+ pytest.raises(Exception, match="No supported systems definition provided"),
+ ),
+ (
+ ExtendedApplicationConfig(
+ name="application", supported_systems=[NamedExecutionConfig(name="")]
+ ),
+ pytest.raises(
+ Exception,
+ match="Unable to read supported system definition, name is missed",
+ ),
+ ),
+ (
+ ExtendedApplicationConfig(
+ name="application",
+ supported_systems=[
+ NamedExecutionConfig(
+ name="system",
+ commands={"command": ["cmd"]},
+ user_params={"command": [UserParamConfig(name="param")]},
+ )
+ ],
+ commands={"command": ["cmd {user_params:0}"]},
+ user_params={"command": [UserParamConfig(name="param")]},
+ ),
+ pytest.raises(
+ Exception, match="Default parameters for command .* should have aliases"
+ ),
+ ),
+ (
+ ExtendedApplicationConfig(
+ name="application",
+ supported_systems=[
+ NamedExecutionConfig(
+ name="system",
+ commands={"command": ["cmd"]},
+ user_params={"command": [UserParamConfig(name="param")]},
+ )
+ ],
+ commands={"command": ["cmd {user_params:0}"]},
+ user_params={"command": [UserParamConfig(name="param", alias="param")]},
+ ),
+ pytest.raises(
+ Exception, match="system parameters for command .* should have aliases"
+ ),
+ ),
+ ),
+)
+def test_load_application_exceptional_cases(
+ config: ExtendedApplicationConfig, expected_error: Any
+) -> None:
+ """Test exceptional cases for application load function."""
+ with expected_error:
+ load_applications(config)
+
+
+def test_load_application() -> None:
+ """Test application load function.
+
+ The main purpose of this test is to test configuration for application
+ for different systems. All configuration should be correctly
+ overridden if needed.
+ """
+ application_5 = get_application("application_5")
+ assert len(application_5) == 2
+
+ default_commands = {
+ "build": Command(["default build command"]),
+ "run": Command(["default run command"]),
+ }
+ default_variables = {"var1": "value1", "var2": "value2"}
+
+ application_5_0 = application_5[0]
+ assert application_5_0.build_dir == "default_build_dir"
+ assert application_5_0.supported_systems == ["System 1"]
+ assert application_5_0.commands == default_commands
+ assert application_5_0.variables == default_variables
+ assert application_5_0.lock is False
+
+ application_5_1 = application_5[1]
+ assert application_5_1.build_dir == application_5_0.build_dir
+ assert application_5_1.supported_systems == ["System 2"]
+ assert application_5_1.commands == application_5_1.commands
+ assert application_5_1.variables == default_variables
+
+ application_5a = get_application("application_5A")
+ assert len(application_5a) == 2
+
+ application_5a_0 = application_5a[0]
+ assert application_5a_0.supported_systems == ["System 1"]
+ assert application_5a_0.build_dir == "build_5A"
+ assert application_5a_0.commands == default_commands
+ assert application_5a_0.variables == {"var1": "new value1", "var2": "value2"}
+ assert application_5a_0.lock is False
+
+ application_5a_1 = application_5a[1]
+ assert application_5a_1.supported_systems == ["System 2"]
+ assert application_5a_1.build_dir == "build"
+ assert application_5a_1.commands == {
+ "build": Command(["default build command"]),
+ "run": Command(["run command on system 2"]),
+ }
+ assert application_5a_1.variables == {"var1": "value1", "var2": "new value2"}
+ assert application_5a_1.lock is True
+
+ application_5b = get_application("application_5B")
+ assert len(application_5b) == 2
+
+ application_5b_0 = application_5b[0]
+ assert application_5b_0.build_dir == "build_5B"
+ assert application_5b_0.supported_systems == ["System 1"]
+ assert application_5b_0.commands == {
+ "build": Command(["default build command with value for var1 System1"], []),
+ "run": Command(["default run command with value for var2 System1"]),
+ }
+ assert "non_used_command" not in application_5b_0.commands
+
+ application_5b_1 = application_5b[1]
+ assert application_5b_1.build_dir == "build"
+ assert application_5b_1.supported_systems == ["System 2"]
+ assert application_5b_1.commands == {
+ "build": Command(
+ [
+ "build command on system 2 with value"
+ " for var1 System2 {user_params:param1}"
+ ],
+ [
+ Param(
+ "--param",
+ "Sample command param",
+ ["value1", "value2", "value3"],
+ "value1",
+ )
+ ],
+ ),
+ "run": Command(["run command on system 2"], []),
+ }
diff --git a/tests/mlia/test_backend_common.py b/tests/mlia/test_backend_common.py
new file mode 100644
index 0000000..82a985a
--- /dev/null
+++ b/tests/mlia/test_backend_common.py
@@ -0,0 +1,486 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+# pylint: disable=no-self-use,protected-access
+"""Tests for the common backend module."""
+from contextlib import ExitStack as does_not_raise
+from pathlib import Path
+from typing import Any
+from typing import cast
+from typing import Dict
+from typing import IO
+from typing import List
+from typing import Optional
+from typing import Tuple
+from typing import Union
+from unittest.mock import MagicMock
+
+import pytest
+
+from mlia.backend.application import Application
+from mlia.backend.common import Backend
+from mlia.backend.common import BaseBackendConfig
+from mlia.backend.common import Command
+from mlia.backend.common import ConfigurationException
+from mlia.backend.common import load_config
+from mlia.backend.common import Param
+from mlia.backend.common import parse_raw_parameter
+from mlia.backend.common import remove_backend
+from mlia.backend.config import ApplicationConfig
+from mlia.backend.config import UserParamConfig
+from mlia.backend.execution import ExecutionContext
+from mlia.backend.execution import ParamResolver
+from mlia.backend.system import System
+
+
+@pytest.mark.parametrize(
+ "directory_name, expected_exception",
+ (
+ ("some_dir", does_not_raise()),
+ (None, pytest.raises(Exception, match="No directory name provided")),
+ ),
+)
+def test_remove_backend(
+ monkeypatch: Any, directory_name: str, expected_exception: Any
+) -> None:
+ """Test remove_backend function."""
+ mock_remove_resource = MagicMock()
+ monkeypatch.setattr("mlia.backend.common.remove_resource", mock_remove_resource)
+
+ with expected_exception:
+ remove_backend(directory_name, "applications")
+
+
+@pytest.mark.parametrize(
+ "filename, expected_exception",
+ (
+ ("application_config.json", does_not_raise()),
+ (None, pytest.raises(Exception, match="Unable to read config")),
+ ),
+)
+def test_load_config(
+ filename: str, expected_exception: Any, test_resources_path: Path, monkeypatch: Any
+) -> None:
+ """Test load_config."""
+ with expected_exception:
+ configs: List[Optional[Union[Path, IO[bytes]]]] = (
+ [None]
+ if not filename
+ else [
+ # Ignore pylint warning as 'with' can't be used inside of a
+ # generator expression.
+ # pylint: disable=consider-using-with
+ open(test_resources_path / filename, "rb"),
+ test_resources_path / filename,
+ ]
+ )
+ for config in configs:
+ json_mock = MagicMock()
+ monkeypatch.setattr("mlia.backend.common.json.load", json_mock)
+ load_config(config)
+ json_mock.assert_called_once()
+
+
+class TestBackend:
+ """Test Backend class."""
+
+ def test___repr__(self) -> None:
+ """Test the representation of Backend instance."""
+ backend = Backend(
+ BaseBackendConfig(name="Testing name", description="Testing description")
+ )
+ assert str(backend) == "Testing name"
+
+ def test__eq__(self) -> None:
+ """Test equality method with different cases."""
+ backend1 = Backend(BaseBackendConfig(name="name", description="description"))
+ backend1.commands = {"command": Command(["command"])}
+
+ backend2 = Backend(BaseBackendConfig(name="name", description="description"))
+ backend2.commands = {"command": Command(["command"])}
+
+ backend3 = Backend(
+ BaseBackendConfig(
+ name="Ben", description="This is not the Backend you are looking for"
+ )
+ )
+ backend3.commands = {"wave": Command(["wave hand"])}
+
+ backend4 = "Foo" # checking not isinstance(backend4, Backend)
+
+ assert backend1 == backend2
+ assert backend1 != backend3
+ assert backend1 != backend4
+
+ @pytest.mark.parametrize(
+ "parameter, valid",
+ [
+ ("--choice-param dummy_value_1", True),
+ ("--choice-param wrong_value", False),
+ ("--open-param something", True),
+ ("--wrong-param value", False),
+ ],
+ )
+ def test_validate_parameter(
+ self, parameter: str, valid: bool, test_resources_path: Path
+ ) -> None:
+ """Test validate_parameter."""
+ config = cast(
+ List[ApplicationConfig],
+ load_config(test_resources_path / "hello_world.json"),
+ )
+ # The application configuration is a list of configurations so we need
+ # only the first one
+ # Exercise the validate_parameter test using the Application classe which
+ # inherits from Backend.
+ application = Application(config[0])
+ assert application.validate_parameter("run", parameter) == valid
+
+ def test_validate_parameter_with_invalid_command(
+ self, test_resources_path: Path
+ ) -> None:
+ """Test validate_parameter with an invalid command_name."""
+ config = cast(
+ List[ApplicationConfig],
+ load_config(test_resources_path / "hello_world.json"),
+ )
+ application = Application(config[0])
+ with pytest.raises(AttributeError) as err:
+ # command foo does not exist, so raise an error
+ application.validate_parameter("foo", "bar")
+ assert "Unknown command: 'foo'" in str(err.value)
+
+ def test_build_command(self, monkeypatch: Any) -> None:
+ """Test command building."""
+ config = {
+ "name": "test",
+ "commands": {
+ "build": ["build {user_params:0} {user_params:1}"],
+ "run": ["run {user_params:0}"],
+ "post_run": ["post_run {application_params:0} on {system_params:0}"],
+ "some_command": ["Command with {variables:var_A}"],
+ "empty_command": [""],
+ },
+ "user_params": {
+ "build": [
+ {
+ "name": "choice_param_0=",
+ "values": [1, 2, 3],
+ "default_value": 1,
+ },
+ {"name": "choice_param_1", "values": [3, 4, 5], "default_value": 3},
+ {"name": "choice_param_3", "values": [6, 7, 8]},
+ ],
+ "run": [{"name": "flag_param_0"}],
+ },
+ "variables": {"var_A": "value for variable A"},
+ }
+
+ monkeypatch.setattr("mlia.backend.system.ProtocolFactory", MagicMock())
+ application, system = Application(config), System(config) # type: ignore
+ context = ExecutionContext(
+ app=application,
+ app_params=[],
+ system=system,
+ system_params=[],
+ custom_deploy_data=[],
+ )
+
+ param_resolver = ParamResolver(context)
+
+ cmd = application.build_command(
+ "build", ["choice_param_0=2", "choice_param_1=4"], param_resolver
+ )
+ assert cmd == ["build choice_param_0=2 choice_param_1 4"]
+
+ cmd = application.build_command("build", ["choice_param_0=2"], param_resolver)
+ assert cmd == ["build choice_param_0=2 choice_param_1 3"]
+
+ cmd = application.build_command(
+ "build", ["choice_param_0=2", "choice_param_3=7"], param_resolver
+ )
+ assert cmd == ["build choice_param_0=2 choice_param_1 3"]
+
+ with pytest.raises(
+ ConfigurationException, match="Command 'foo' could not be found."
+ ):
+ application.build_command("foo", [""], param_resolver)
+
+ cmd = application.build_command("some_command", [], param_resolver)
+ assert cmd == ["Command with value for variable A"]
+
+ cmd = application.build_command("empty_command", [], param_resolver)
+ assert cmd == [""]
+
+ @pytest.mark.parametrize("class_", [Application, System])
+ def test_build_command_unknown_variable(self, class_: type) -> None:
+ """Test that unable to construct backend with unknown variable."""
+ with pytest.raises(Exception, match="Unknown variable var1"):
+ config = {"name": "test", "commands": {"run": ["run {variables:var1}"]}}
+ class_(config)
+
+ @pytest.mark.parametrize(
+ "class_, config, expected_output",
+ [
+ (
+ Application,
+ {
+ "name": "test",
+ "commands": {
+ "build": ["build {user_params:0} {user_params:1}"],
+ "run": ["run {user_params:0}"],
+ },
+ "user_params": {
+ "build": [
+ {
+ "name": "choice_param_0=",
+ "values": ["a", "b", "c"],
+ "default_value": "a",
+ "alias": "param_1",
+ },
+ {
+ "name": "choice_param_1",
+ "values": ["a", "b", "c"],
+ "default_value": "a",
+ "alias": "param_2",
+ },
+ {"name": "choice_param_3", "values": ["a", "b", "c"]},
+ ],
+ "run": [{"name": "flag_param_0"}],
+ },
+ },
+ [
+ (
+ "b",
+ Param(
+ name="choice_param_0=",
+ description="",
+ values=["a", "b", "c"],
+ default_value="a",
+ alias="param_1",
+ ),
+ ),
+ (
+ "a",
+ Param(
+ name="choice_param_1",
+ description="",
+ values=["a", "b", "c"],
+ default_value="a",
+ alias="param_2",
+ ),
+ ),
+ (
+ "c",
+ Param(
+ name="choice_param_3",
+ description="",
+ values=["a", "b", "c"],
+ ),
+ ),
+ ],
+ ),
+ (System, {"name": "test"}, []),
+ ],
+ )
+ def test_resolved_parameters(
+ self,
+ monkeypatch: Any,
+ class_: type,
+ config: Dict,
+ expected_output: List[Tuple[Optional[str], Param]],
+ ) -> None:
+ """Test command building."""
+ monkeypatch.setattr("mlia.backend.system.ProtocolFactory", MagicMock())
+ backend = class_(config)
+
+ params = backend.resolved_parameters(
+ "build", ["choice_param_0=b", "choice_param_3=c"]
+ )
+ assert params == expected_output
+
+ @pytest.mark.parametrize(
+ ["param_name", "user_param", "expected_value"],
+ [
+ (
+ "test_name",
+ "test_name=1234",
+ "1234",
+ ), # optional parameter using '='
+ (
+ "test_name",
+ "test_name 1234",
+ "1234",
+ ), # optional parameter using ' '
+ ("test_name", "test_name", None), # flag
+ (None, "test_name=1234", "1234"), # positional parameter
+ ],
+ )
+ def test_resolved_user_parameters(
+ self, param_name: str, user_param: str, expected_value: str
+ ) -> None:
+ """Test different variants to provide user parameters."""
+ # A dummy config providing one backend config
+ config = {
+ "name": "test_backend",
+ "commands": {
+ "test": ["user_param:test_param"],
+ },
+ "user_params": {
+ "test": [UserParamConfig(name=param_name, alias="test_name")],
+ },
+ }
+ backend = Backend(cast(BaseBackendConfig, config))
+ params = backend.resolved_parameters(
+ command_name="test", user_params=[user_param]
+ )
+ assert len(params) == 1
+ value, param = params[0]
+ assert param_name == param.name
+ assert expected_value == value
+
+ @pytest.mark.parametrize(
+ "input_param,expected",
+ [
+ ("--param=1", ("--param", "1")),
+ ("--param 1", ("--param", "1")),
+ ("--flag", ("--flag", None)),
+ ],
+ )
+ def test__parse_raw_parameter(
+ self, input_param: str, expected: Tuple[str, Optional[str]]
+ ) -> None:
+ """Test internal method of parsing a single raw parameter."""
+ assert parse_raw_parameter(input_param) == expected
+
+
+class TestParam:
+ """Test Param class."""
+
+ def test__eq__(self) -> None:
+ """Test equality method with different cases."""
+ param1 = Param(name="test", description="desc", values=["values"])
+ param2 = Param(name="test", description="desc", values=["values"])
+ param3 = Param(name="test1", description="desc", values=["values"])
+ param4 = object()
+
+ assert param1 == param2
+ assert param1 != param3
+ assert param1 != param4
+
+ def test_get_details(self) -> None:
+ """Test get_details() method."""
+ param1 = Param(name="test", description="desc", values=["values"])
+ assert param1.get_details() == {
+ "name": "test",
+ "values": ["values"],
+ "description": "desc",
+ }
+
+ def test_invalid(self) -> None:
+ """Test invalid use cases for the Param class."""
+ with pytest.raises(
+ ConfigurationException,
+ match="Either name, alias or both must be set to identify a parameter.",
+ ):
+ Param(name=None, description="desc", values=["values"])
+
+
+class TestCommand:
+ """Test Command class."""
+
+ def test_get_details(self) -> None:
+ """Test get_details() method."""
+ param1 = Param(name="test", description="desc", values=["values"])
+ command1 = Command(command_strings=["echo test"], params=[param1])
+ assert command1.get_details() == {
+ "command_strings": ["echo test"],
+ "user_params": [
+ {"name": "test", "values": ["values"], "description": "desc"}
+ ],
+ }
+
+ def test__eq__(self) -> None:
+ """Test equality method with different cases."""
+ param1 = Param("test", "desc", ["values"])
+ param2 = Param("test1", "desc1", ["values1"])
+ command1 = Command(command_strings=["echo test"], params=[param1])
+ command2 = Command(command_strings=["echo test"], params=[param1])
+ command3 = Command(command_strings=["echo test"])
+ command4 = Command(command_strings=["echo test"], params=[param2])
+ command5 = object()
+
+ assert command1 == command2
+ assert command1 != command3
+ assert command1 != command4
+ assert command1 != command5
+
+ @pytest.mark.parametrize(
+ "params, expected_error",
+ [
+ [[], does_not_raise()],
+ [[Param("param", "param description", [])], does_not_raise()],
+ [
+ [
+ Param("param", "param description", [], None, "alias"),
+ Param("param", "param description", [], None),
+ ],
+ does_not_raise(),
+ ],
+ [
+ [
+ Param("param1", "param1 description", [], None, "alias1"),
+ Param("param2", "param2 description", [], None, "alias2"),
+ ],
+ does_not_raise(),
+ ],
+ [
+ [
+ Param("param", "param description", [], None, "alias"),
+ Param("param", "param description", [], None, "alias"),
+ ],
+ pytest.raises(ConfigurationException, match="Non unique aliases alias"),
+ ],
+ [
+ [
+ Param("alias", "param description", [], None, "alias1"),
+ Param("param", "param description", [], None, "alias"),
+ ],
+ pytest.raises(
+ ConfigurationException,
+ match="Aliases .* could not be used as parameter name",
+ ),
+ ],
+ [
+ [
+ Param("alias", "param description", [], None, "alias"),
+ Param("param1", "param1 description", [], None, "alias1"),
+ ],
+ does_not_raise(),
+ ],
+ [
+ [
+ Param("alias", "param description", [], None, "alias"),
+ Param("alias", "param1 description", [], None, "alias1"),
+ ],
+ pytest.raises(
+ ConfigurationException,
+ match="Aliases .* could not be used as parameter name",
+ ),
+ ],
+ [
+ [
+ Param("param1", "param1 description", [], None, "alias1"),
+ Param("param2", "param2 description", [], None, "alias1"),
+ Param("param3", "param3 description", [], None, "alias2"),
+ Param("param4", "param4 description", [], None, "alias2"),
+ ],
+ pytest.raises(
+ ConfigurationException, match="Non unique aliases alias1, alias2"
+ ),
+ ],
+ ],
+ )
+ def test_validate_params(self, params: List[Param], expected_error: Any) -> None:
+ """Test command validation function."""
+ with expected_error:
+ Command([], params)
diff --git a/tests/mlia/test_backend_controller.py b/tests/mlia/test_backend_controller.py
new file mode 100644
index 0000000..a047adf
--- /dev/null
+++ b/tests/mlia/test_backend_controller.py
@@ -0,0 +1,160 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Tests for system controller."""
+import csv
+import os
+import time
+from pathlib import Path
+from typing import Any
+
+import psutil
+import pytest
+
+from mlia.backend.common import ConfigurationException
+from mlia.backend.controller import SystemController
+from mlia.backend.controller import SystemControllerSingleInstance
+from mlia.backend.proc import ShellCommand
+
+
+def get_system_controller(**kwargs: Any) -> SystemController:
+ """Get service controller."""
+ single_instance = kwargs.get("single_instance", False)
+ if single_instance:
+ pid_file_path = kwargs.get("pid_file_path")
+ return SystemControllerSingleInstance(pid_file_path)
+
+ return SystemController()
+
+
+def test_service_controller() -> None:
+ """Test service controller functionality."""
+ service_controller = get_system_controller()
+
+ assert service_controller.get_output() == ("", "")
+ with pytest.raises(ConfigurationException, match="Wrong working directory"):
+ service_controller.start(["sleep 100"], Path("unknown"))
+
+ service_controller.start(["sleep 100"], Path.cwd())
+ assert service_controller.is_running()
+
+ service_controller.stop(True)
+ assert not service_controller.is_running()
+ assert service_controller.get_output() == ("", "")
+
+ service_controller.stop()
+
+ with pytest.raises(
+ ConfigurationException, match="System should have only one command to run"
+ ):
+ service_controller.start(["sleep 100", "sleep 101"], Path.cwd())
+
+ with pytest.raises(ConfigurationException, match="No startup command provided"):
+ service_controller.start([""], Path.cwd())
+
+
+def test_service_controller_bad_configuration() -> None:
+ """Test service controller functionality for bad configuration."""
+ with pytest.raises(Exception, match="No pid file path presented"):
+ service_controller = get_system_controller(
+ single_instance=True, pid_file_path=None
+ )
+ service_controller.start(["sleep 100"], Path.cwd())
+
+
+def test_service_controller_writes_process_info_correctly(tmpdir: Any) -> None:
+ """Test that controller writes process info correctly."""
+ pid_file = Path(tmpdir) / "test.pid"
+
+ service_controller = get_system_controller(
+ single_instance=True, pid_file_path=Path(tmpdir) / "test.pid"
+ )
+
+ service_controller.start(["sleep 100"], Path.cwd())
+ assert service_controller.is_running()
+ assert pid_file.is_file()
+
+ with open(pid_file, "r", encoding="utf-8") as file:
+ csv_reader = csv.reader(file)
+ rows = list(csv_reader)
+ assert len(rows) == 1
+
+ name, *_ = rows[0]
+ assert name == "sleep"
+
+ service_controller.stop()
+ assert pid_file.exists()
+
+
+def test_service_controller_does_not_write_process_info_if_process_finishes(
+ tmpdir: Any,
+) -> None:
+ """Test that controller does not write process info if process already finished."""
+ pid_file = Path(tmpdir) / "test.pid"
+ service_controller = get_system_controller(
+ single_instance=True, pid_file_path=pid_file
+ )
+ service_controller.is_running = lambda: False # type: ignore
+ service_controller.start(["echo hello"], Path.cwd())
+
+ assert not pid_file.exists()
+
+
+def test_service_controller_searches_for_previous_instances_correctly(
+ tmpdir: Any,
+) -> None:
+ """Test that controller searches for previous instances correctly."""
+ pid_file = Path(tmpdir) / "test.pid"
+ command = ShellCommand().run("sleep", "100")
+ assert command.is_alive()
+
+ pid = command.process.pid
+ process = psutil.Process(pid)
+ with open(pid_file, "w", encoding="utf-8") as file:
+ csv_writer = csv.writer(file)
+ csv_writer.writerow(("some_process", "some_program", "some_cwd", os.getpid()))
+ csv_writer.writerow((process.name(), process.exe(), process.cwd(), process.pid))
+ csv_writer.writerow(("some_old_process", "not_running", "from_nowhere", 77777))
+
+ service_controller = get_system_controller(
+ single_instance=True, pid_file_path=pid_file
+ )
+ service_controller.start(["sleep 100"], Path.cwd())
+ # controller should stop this process as it is currently running and
+ # mentioned in pid file
+ assert not command.is_alive()
+
+ service_controller.stop()
+
+
+@pytest.mark.parametrize(
+ "executable", ["test_backend_run_script.sh", "test_backend_run"]
+)
+def test_service_controller_run_shell_script(
+ executable: str, test_resources_path: Path
+) -> None:
+ """Test controller's ability to run shell scripts."""
+ script_path = test_resources_path / "scripts"
+
+ service_controller = get_system_controller()
+
+ service_controller.start([executable], script_path)
+
+ assert service_controller.is_running()
+ # give time for the command to produce output
+ time.sleep(2)
+ service_controller.stop(wait=True)
+ assert not service_controller.is_running()
+ stdout, stderr = service_controller.get_output()
+ assert stdout == "Hello from script\n"
+ assert stderr == "Oops!\n"
+
+
+def test_service_controller_does_nothing_if_not_started(tmpdir: Any) -> None:
+ """Test that nothing happened if controller is not started."""
+ service_controller = get_system_controller(
+ single_instance=True, pid_file_path=Path(tmpdir) / "test.pid"
+ )
+
+ assert not service_controller.is_running()
+ service_controller.stop()
+ assert not service_controller.is_running()
diff --git a/tests/mlia/test_backend_execution.py b/tests/mlia/test_backend_execution.py
new file mode 100644
index 0000000..9395352
--- /dev/null
+++ b/tests/mlia/test_backend_execution.py
@@ -0,0 +1,518 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+# pylint: disable=no-self-use
+"""Test backend execution module."""
+from contextlib import ExitStack as does_not_raise
+from pathlib import Path
+from typing import Any
+from typing import Dict
+from unittest import mock
+from unittest.mock import MagicMock
+
+import pytest
+from sh import CommandNotFound
+
+from mlia.backend.application import Application
+from mlia.backend.application import get_application
+from mlia.backend.common import DataPaths
+from mlia.backend.common import UserParamConfig
+from mlia.backend.config import ApplicationConfig
+from mlia.backend.config import LocalProtocolConfig
+from mlia.backend.config import SystemConfig
+from mlia.backend.execution import deploy_data
+from mlia.backend.execution import execute_commands_locally
+from mlia.backend.execution import ExecutionContext
+from mlia.backend.execution import get_application_and_system
+from mlia.backend.execution import get_application_by_name_and_system
+from mlia.backend.execution import get_file_lock_path
+from mlia.backend.execution import ParamResolver
+from mlia.backend.execution import Reporter
+from mlia.backend.execution import wait
+from mlia.backend.output_parser import Base64OutputParser
+from mlia.backend.output_parser import OutputParser
+from mlia.backend.output_parser import RegexOutputParser
+from mlia.backend.proc import CommandFailedException
+from mlia.backend.system import get_system
+from mlia.backend.system import load_system
+
+
+def test_context_param_resolver(tmpdir: Any) -> None:
+ """Test parameter resolving."""
+ system_config_location = Path(tmpdir) / "system"
+ system_config_location.mkdir()
+
+ application_config_location = Path(tmpdir) / "application"
+ application_config_location.mkdir()
+
+ ctx = ExecutionContext(
+ app=Application(
+ ApplicationConfig(
+ name="test_application",
+ description="Test application",
+ config_location=application_config_location,
+ build_dir="build-{application.name}-{system.name}",
+ commands={
+ "run": [
+ "run_command1 {user_params:0}",
+ "run_command2 {user_params:1}",
+ ]
+ },
+ variables={"var_1": "value for var_1"},
+ user_params={
+ "run": [
+ UserParamConfig(
+ name="--param1",
+ description="Param 1",
+ default_value="123",
+ alias="param_1",
+ ),
+ UserParamConfig(
+ name="--param2", description="Param 2", default_value="456"
+ ),
+ UserParamConfig(
+ name="--param3", description="Param 3", alias="param_3"
+ ),
+ UserParamConfig(
+ name="--param4=",
+ description="Param 4",
+ default_value="456",
+ alias="param_4",
+ ),
+ UserParamConfig(
+ description="Param 5",
+ default_value="789",
+ alias="param_5",
+ ),
+ ]
+ },
+ )
+ ),
+ app_params=["--param2=789"],
+ system=load_system(
+ SystemConfig(
+ name="test_system",
+ description="Test system",
+ config_location=system_config_location,
+ build_dir="build",
+ data_transfer=LocalProtocolConfig(protocol="local"),
+ commands={
+ "build": ["build_command1 {user_params:0}"],
+ "run": ["run_command {application.commands.run:1}"],
+ },
+ variables={"var_1": "value for var_1"},
+ user_params={
+ "build": [
+ UserParamConfig(
+ name="--param1", description="Param 1", default_value="aaa"
+ ),
+ UserParamConfig(name="--param2", description="Param 2"),
+ ]
+ },
+ )
+ ),
+ system_params=["--param1=bbb"],
+ custom_deploy_data=[],
+ )
+
+ param_resolver = ParamResolver(ctx)
+ expected_values = {
+ "application.name": "test_application",
+ "application.description": "Test application",
+ "application.config_dir": str(application_config_location),
+ "application.build_dir": "{}/build-test_application-test_system".format(
+ application_config_location
+ ),
+ "application.commands.run:0": "run_command1 --param1 123",
+ "application.commands.run.params:0": "123",
+ "application.commands.run.params:param_1": "123",
+ "application.commands.run:1": "run_command2 --param2 789",
+ "application.commands.run.params:1": "789",
+ "application.variables:var_1": "value for var_1",
+ "system.name": "test_system",
+ "system.description": "Test system",
+ "system.config_dir": str(system_config_location),
+ "system.commands.build:0": "build_command1 --param1 bbb",
+ "system.commands.run:0": "run_command run_command2 --param2 789",
+ "system.commands.build.params:0": "bbb",
+ "system.variables:var_1": "value for var_1",
+ }
+
+ for param, value in expected_values.items():
+ assert param_resolver(param) == value
+
+ assert ctx.build_dir() == Path(
+ "{}/build-test_application-test_system".format(application_config_location)
+ )
+
+ expected_errors = {
+ "application.variables:var_2": pytest.raises(
+ Exception, match="Unknown variable var_2"
+ ),
+ "application.commands.clean:0": pytest.raises(
+ Exception, match="Command clean not found"
+ ),
+ "application.commands.run:2": pytest.raises(
+ Exception, match="Invalid index 2 for command run"
+ ),
+ "application.commands.run.params:5": pytest.raises(
+ Exception, match="Invalid parameter index 5 for command run"
+ ),
+ "application.commands.run.params:param_2": pytest.raises(
+ Exception,
+ match="No value for parameter with index or alias param_2 of command run",
+ ),
+ "UNKNOWN": pytest.raises(
+ Exception, match="Unable to resolve parameter UNKNOWN"
+ ),
+ "system.commands.build.params:1": pytest.raises(
+ Exception,
+ match="No value for parameter with index or alias 1 of command build",
+ ),
+ "system.commands.build:A": pytest.raises(
+ Exception, match="Bad command index A"
+ ),
+ "system.variables:var_2": pytest.raises(
+ Exception, match="Unknown variable var_2"
+ ),
+ }
+ for param, error in expected_errors.items():
+ with error:
+ param_resolver(param)
+
+ resolved_params = ctx.app.resolved_parameters("run", [])
+ expected_user_params = {
+ "user_params:0": "--param1 123",
+ "user_params:param_1": "--param1 123",
+ "user_params:2": "--param3",
+ "user_params:param_3": "--param3",
+ "user_params:3": "--param4=456",
+ "user_params:param_4": "--param4=456",
+ "user_params:param_5": "789",
+ }
+ for param, expected_value in expected_user_params.items():
+ assert param_resolver(param, "run", resolved_params) == expected_value
+
+ with pytest.raises(
+ Exception, match="Invalid index 5 for user params of command run"
+ ):
+ param_resolver("user_params:5", "run", resolved_params)
+
+ with pytest.raises(
+ Exception, match="No user parameter for command 'run' with alias 'param_2'."
+ ):
+ param_resolver("user_params:param_2", "run", resolved_params)
+
+ with pytest.raises(Exception, match="Unable to resolve user params"):
+ param_resolver("user_params:0", "", resolved_params)
+
+ bad_ctx = ExecutionContext(
+ app=Application(
+ ApplicationConfig(
+ name="test_application",
+ config_location=application_config_location,
+ build_dir="build-{user_params:0}",
+ )
+ ),
+ app_params=["--param2=789"],
+ system=load_system(
+ SystemConfig(
+ name="test_system",
+ description="Test system",
+ config_location=system_config_location,
+ build_dir="build-{system.commands.run.params:123}",
+ data_transfer=LocalProtocolConfig(protocol="local"),
+ )
+ ),
+ system_params=["--param1=bbb"],
+ custom_deploy_data=[],
+ )
+ param_resolver = ParamResolver(bad_ctx)
+ with pytest.raises(Exception, match="Unable to resolve user params"):
+ bad_ctx.build_dir()
+
+
+# pylint: disable=too-many-arguments
+@pytest.mark.parametrize(
+ "application_name, soft_lock, sys_lock, lock_dir, expected_error, expected_path",
+ (
+ (
+ "test_application",
+ True,
+ True,
+ Path("/tmp"),
+ does_not_raise(),
+ Path("/tmp/middleware_test_application_test_system.lock"),
+ ),
+ (
+ "$$test_application$!:",
+ True,
+ True,
+ Path("/tmp"),
+ does_not_raise(),
+ Path("/tmp/middleware_test_application_test_system.lock"),
+ ),
+ (
+ "test_application",
+ True,
+ True,
+ Path("unknown"),
+ pytest.raises(
+ Exception, match="Invalid directory unknown for lock files provided"
+ ),
+ None,
+ ),
+ (
+ "test_application",
+ False,
+ True,
+ Path("/tmp"),
+ does_not_raise(),
+ Path("/tmp/middleware_test_system.lock"),
+ ),
+ (
+ "test_application",
+ True,
+ False,
+ Path("/tmp"),
+ does_not_raise(),
+ Path("/tmp/middleware_test_application.lock"),
+ ),
+ (
+ "test_application",
+ False,
+ False,
+ Path("/tmp"),
+ pytest.raises(Exception, match="No filename for lock provided"),
+ None,
+ ),
+ ),
+)
+def test_get_file_lock_path(
+ application_name: str,
+ soft_lock: bool,
+ sys_lock: bool,
+ lock_dir: Path,
+ expected_error: Any,
+ expected_path: Path,
+) -> None:
+ """Test get_file_lock_path function."""
+ with expected_error:
+ ctx = ExecutionContext(
+ app=Application(ApplicationConfig(name=application_name, lock=soft_lock)),
+ app_params=[],
+ system=load_system(
+ SystemConfig(
+ name="test_system",
+ lock=sys_lock,
+ data_transfer=LocalProtocolConfig(protocol="local"),
+ )
+ ),
+ system_params=[],
+ custom_deploy_data=[],
+ )
+ path = get_file_lock_path(ctx, lock_dir)
+ assert path == expected_path
+
+
+def test_get_application_by_name_and_system(monkeypatch: Any) -> None:
+ """Test exceptional case for get_application_by_name_and_system."""
+ monkeypatch.setattr(
+ "mlia.backend.execution.get_application",
+ MagicMock(return_value=[MagicMock(), MagicMock()]),
+ )
+
+ with pytest.raises(
+ ValueError,
+ match="Error during getting application test_application for the "
+ "system test_system",
+ ):
+ get_application_by_name_and_system("test_application", "test_system")
+
+
+def test_get_application_and_system(monkeypatch: Any) -> None:
+ """Test exceptional case for get_application_and_system."""
+ monkeypatch.setattr(
+ "mlia.backend.execution.get_system", MagicMock(return_value=None)
+ )
+
+ with pytest.raises(ValueError, match="System test_system is not found"):
+ get_application_and_system("test_application", "test_system")
+
+
+def test_wait_function(monkeypatch: Any) -> None:
+ """Test wait function."""
+ sleep_mock = MagicMock()
+ monkeypatch.setattr("time.sleep", sleep_mock)
+ wait(0.1)
+ sleep_mock.assert_called_once()
+
+
+def test_deployment_execution_context() -> None:
+ """Test property 'is_deploy_needed' of the ExecutionContext."""
+ ctx = ExecutionContext(
+ app=get_application("application_1")[0],
+ app_params=[],
+ system=get_system("System 1"),
+ system_params=[],
+ )
+ assert not ctx.is_deploy_needed
+ deploy_data(ctx) # should be a NOP
+
+ ctx = ExecutionContext(
+ app=get_application("application_1")[0],
+ app_params=[],
+ system=get_system("System 1"),
+ system_params=[],
+ custom_deploy_data=[DataPaths(Path("README.md"), ".")],
+ )
+ assert ctx.is_deploy_needed
+
+ ctx = ExecutionContext(
+ app=get_application("application_1")[0],
+ app_params=[],
+ system=None,
+ system_params=[],
+ )
+ assert not ctx.is_deploy_needed
+ with pytest.raises(AssertionError):
+ deploy_data(ctx)
+
+
+def test_reporter_execution_context(tmp_path: Path) -> None:
+ """Test ExecutionContext creates a reporter when a report file is provided."""
+ # Configure regex parser for the system manually
+ sys = get_system("System 1")
+ assert sys is not None
+ sys.reporting = {
+ "regex": {
+ "simulated_time": {"pattern": "Simulated time.*: (.*)s", "type": "float"}
+ }
+ }
+ report_file_path = tmp_path / "test_report.json"
+
+ ctx = ExecutionContext(
+ app=get_application("application_1")[0],
+ app_params=[],
+ system=sys,
+ system_params=[],
+ report_file=report_file_path,
+ )
+ assert isinstance(ctx.reporter, Reporter)
+ assert len(ctx.reporter.parsers) == 2
+ assert any(isinstance(parser, RegexOutputParser) for parser in ctx.reporter.parsers)
+ assert any(
+ isinstance(parser, Base64OutputParser) for parser in ctx.reporter.parsers
+ )
+
+
+class TestExecuteCommandsLocally:
+ """Test execute_commands_locally() function."""
+
+ @pytest.mark.parametrize(
+ "first_command, exception, expected_output",
+ (
+ (
+ "echo 'hello'",
+ None,
+ "Running: echo 'hello'\nhello\nRunning: echo 'goodbye'\ngoodbye\n",
+ ),
+ (
+ "non-existent-command",
+ CommandNotFound,
+ "Running: non-existent-command\n",
+ ),
+ ("false", CommandFailedException, "Running: false\n"),
+ ),
+ ids=(
+ "runs_multiple_commands",
+ "stops_executing_on_non_existent_command",
+ "stops_executing_when_command_exits_with_error_code",
+ ),
+ )
+ def test_execution(
+ self,
+ first_command: str,
+ exception: Any,
+ expected_output: str,
+ test_resources_path: Path,
+ capsys: Any,
+ ) -> None:
+ """Test expected behaviour of the function."""
+ commands = [first_command, "echo 'goodbye'"]
+ cwd = test_resources_path
+ if exception is None:
+ execute_commands_locally(commands, cwd)
+ else:
+ with pytest.raises(exception):
+ execute_commands_locally(commands, cwd)
+
+ captured = capsys.readouterr()
+ assert captured.out == expected_output
+
+ def test_stops_executing_on_exception(
+ self, monkeypatch: Any, test_resources_path: Path
+ ) -> None:
+ """Ensure commands following an error-exit-code command don't run."""
+ # Mock execute_command() function
+ execute_command_mock = mock.MagicMock()
+ monkeypatch.setattr("mlia.backend.proc.execute_command", execute_command_mock)
+
+ # Mock Command object and assign as return value to execute_command()
+ cmd_mock = mock.MagicMock()
+ execute_command_mock.return_value = cmd_mock
+
+ # Mock the terminate_command (speed up test)
+ terminate_command_mock = mock.MagicMock()
+ monkeypatch.setattr(
+ "mlia.backend.proc.terminate_command", terminate_command_mock
+ )
+
+ # Mock a thrown Exception and assign to Command().exit_code
+ exit_code_mock = mock.PropertyMock(side_effect=Exception("Exception."))
+ type(cmd_mock).exit_code = exit_code_mock
+
+ with pytest.raises(Exception, match="Exception."):
+ execute_commands_locally(
+ ["command_1", "command_2"], cwd=test_resources_path
+ )
+
+ # Assert only "command_1" was executed
+ assert execute_command_mock.call_count == 1
+
+
+def test_reporter(tmpdir: Any) -> None:
+ """Test class 'Reporter'."""
+ ctx = ExecutionContext(
+ app=get_application("application_4")[0],
+ app_params=["--app=TestApp"],
+ system=get_system("System 4"),
+ system_params=[],
+ )
+ assert ctx.system is not None
+
+ class MockParser(OutputParser):
+ """Mock implementation of an output parser."""
+
+ def __init__(self, metrics: Dict[str, Any]) -> None:
+ """Set up the MockParser."""
+ super().__init__(name="test")
+ self.metrics = metrics
+
+ def __call__(self, output: bytearray) -> Dict[str, Any]:
+ """Return mock metrics (ignoring the given output)."""
+ return self.metrics
+
+ metrics = {"Metric": 123, "AnotherMetric": 456}
+ reporter = Reporter(
+ parsers=[MockParser(metrics={key: val}) for key, val in metrics.items()],
+ )
+ reporter.parse(bytearray())
+ report = reporter.report(ctx)
+ assert report["system"]["name"] == ctx.system.name
+ assert report["system"]["params"] == {}
+ assert report["application"]["name"] == ctx.app.name
+ assert report["application"]["params"] == {"--app": "TestApp"}
+ assert report["test"]["metrics"] == metrics
+ report_file = Path(tmpdir) / "report.json"
+ reporter.save(report, report_file)
+ assert report_file.is_file()
diff --git a/tests/mlia/test_backend_fs.py b/tests/mlia/test_backend_fs.py
new file mode 100644
index 0000000..ff9c2ae
--- /dev/null
+++ b/tests/mlia/test_backend_fs.py
@@ -0,0 +1,168 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+# pylint: disable=no-self-use
+"""Module for testing fs.py."""
+from contextlib import ExitStack as does_not_raise
+from pathlib import Path
+from typing import Any
+from typing import Union
+from unittest.mock import MagicMock
+
+import pytest
+
+from mlia.backend.fs import get_backends_path
+from mlia.backend.fs import read_file_as_bytearray
+from mlia.backend.fs import read_file_as_string
+from mlia.backend.fs import recreate_directory
+from mlia.backend.fs import remove_directory
+from mlia.backend.fs import remove_resource
+from mlia.backend.fs import ResourceType
+from mlia.backend.fs import valid_for_filename
+
+
+@pytest.mark.parametrize(
+ "resource_name,expected_path",
+ [
+ ("systems", does_not_raise()),
+ ("applications", does_not_raise()),
+ ("whaaat", pytest.raises(ResourceWarning)),
+ (None, pytest.raises(ResourceWarning)),
+ ],
+)
+def test_get_backends_path(resource_name: ResourceType, expected_path: Any) -> None:
+ """Test get_resources() with multiple parameters."""
+ with expected_path:
+ resource_path = get_backends_path(resource_name)
+ assert resource_path.exists()
+
+
+def test_remove_resource_wrong_directory(
+ monkeypatch: Any, test_applications_path: Path
+) -> None:
+ """Test removing resource with wrong directory."""
+ mock_get_resources = MagicMock(return_value=test_applications_path)
+ monkeypatch.setattr("mlia.backend.fs.get_backends_path", mock_get_resources)
+
+ mock_shutil_rmtree = MagicMock()
+ monkeypatch.setattr("mlia.backend.fs.shutil.rmtree", mock_shutil_rmtree)
+
+ with pytest.raises(Exception, match="Resource .* does not exist"):
+ remove_resource("unknown", "applications")
+ mock_shutil_rmtree.assert_not_called()
+
+ with pytest.raises(Exception, match="Wrong resource .*"):
+ remove_resource("readme.txt", "applications")
+ mock_shutil_rmtree.assert_not_called()
+
+
+def test_remove_resource(monkeypatch: Any, test_applications_path: Path) -> None:
+ """Test removing resource data."""
+ mock_get_resources = MagicMock(return_value=test_applications_path)
+ monkeypatch.setattr("mlia.backend.fs.get_backends_path", mock_get_resources)
+
+ mock_shutil_rmtree = MagicMock()
+ monkeypatch.setattr("mlia.backend.fs.shutil.rmtree", mock_shutil_rmtree)
+
+ remove_resource("application1", "applications")
+ mock_shutil_rmtree.assert_called_once()
+
+
+def test_remove_directory(tmpdir: Any) -> None:
+ """Test directory removal."""
+ tmpdir_path = Path(tmpdir)
+ tmpfile = tmpdir_path / "temp.txt"
+
+ for item in [None, tmpfile]:
+ with pytest.raises(Exception, match="No directory path provided"):
+ remove_directory(item)
+
+ newdir = tmpdir_path / "newdir"
+ newdir.mkdir()
+
+ assert newdir.is_dir()
+ remove_directory(newdir)
+ assert not newdir.exists()
+
+
+def test_recreate_directory(tmpdir: Any) -> None:
+ """Test directory recreation."""
+ with pytest.raises(Exception, match="No directory path provided"):
+ recreate_directory(None)
+
+ tmpdir_path = Path(tmpdir)
+ tmpfile = tmpdir_path / "temp.txt"
+ tmpfile.touch()
+ with pytest.raises(Exception, match="Path .* does exist and it is not a directory"):
+ recreate_directory(tmpfile)
+
+ newdir = tmpdir_path / "newdir"
+ newdir.mkdir()
+ newfile = newdir / "newfile"
+ newfile.touch()
+ assert list(newdir.iterdir()) == [newfile]
+ recreate_directory(newdir)
+ assert not list(newdir.iterdir())
+
+ newdir2 = tmpdir_path / "newdir2"
+ assert not newdir2.exists()
+ recreate_directory(newdir2)
+ assert newdir2.is_dir()
+
+
+def write_to_file(
+ write_directory: Any, write_mode: str, write_text: Union[str, bytes]
+) -> Path:
+ """Write some text to a temporary test file."""
+ tmpdir_path = Path(write_directory)
+ tmpfile = tmpdir_path / "file_name.txt"
+ with open(tmpfile, write_mode) as file: # pylint: disable=unspecified-encoding
+ file.write(write_text)
+ return tmpfile
+
+
+class TestReadFileAsString:
+ """Test read_file_as_string() function."""
+
+ def test_returns_text_from_valid_file(self, tmpdir: Any) -> None:
+ """Ensure the string written to a file read correctly."""
+ file_path = write_to_file(tmpdir, "w", "hello")
+ assert read_file_as_string(file_path) == "hello"
+
+ def test_output_is_empty_string_when_input_file_non_existent(
+ self, tmpdir: Any
+ ) -> None:
+ """Ensure empty string returned when reading from non-existent file."""
+ file_path = Path(tmpdir / "non-existent.txt")
+ assert read_file_as_string(file_path) == ""
+
+
+class TestReadFileAsByteArray:
+ """Test read_file_as_bytearray() function."""
+
+ def test_returns_bytes_from_valid_file(self, tmpdir: Any) -> None:
+ """Ensure the bytes written to a file read correctly."""
+ file_path = write_to_file(tmpdir, "wb", b"hello bytes")
+ assert read_file_as_bytearray(file_path) == b"hello bytes"
+
+ def test_output_is_empty_bytearray_when_input_file_non_existent(
+ self, tmpdir: Any
+ ) -> None:
+ """Ensure empty bytearray returned when reading from non-existent file."""
+ file_path = Path(tmpdir / "non-existent.txt")
+ assert read_file_as_bytearray(file_path) == bytearray()
+
+
+@pytest.mark.parametrize(
+ "value, replacement, expected_result",
+ [
+ ["", "", ""],
+ ["123", "", "123"],
+ ["123", "_", "123"],
+ ["/some_folder/some_script.sh", "", "some_foldersome_script.sh"],
+ ["/some_folder/some_script.sh", "_", "_some_folder_some_script.sh"],
+ ["!;'some_name$%^!", "_", "___some_name____"],
+ ],
+)
+def test_valid_for_filename(value: str, replacement: str, expected_result: str) -> None:
+ """Test function valid_for_filename."""
+ assert valid_for_filename(value, replacement) == expected_result
diff --git a/tests/mlia/test_tools_aiet_wrapper.py b/tests/mlia/test_backend_manager.py
index ab55b71..c81366f 100644
--- a/tests/mlia/test_tools_aiet_wrapper.py
+++ b/tests/mlia/test_backend_manager.py
@@ -1,6 +1,7 @@
# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
-"""Tests for module tools/aiet_wrapper."""
+"""Tests for module backend/manager."""
+import os
from contextlib import ExitStack as does_not_raise
from pathlib import Path
from typing import Any
@@ -13,20 +14,23 @@ from unittest.mock import PropertyMock
import pytest
-from mlia.tools.aiet_wrapper import AIETRunner
-from mlia.tools.aiet_wrapper import DeviceInfo
-from mlia.tools.aiet_wrapper import estimate_performance
-from mlia.tools.aiet_wrapper import ExecutionParams
-from mlia.tools.aiet_wrapper import GenericInferenceOutputParser
-from mlia.tools.aiet_wrapper import GenericInferenceRunnerEthosU
-from mlia.tools.aiet_wrapper import get_aiet_runner
-from mlia.tools.aiet_wrapper import get_generic_runner
-from mlia.tools.aiet_wrapper import get_system_name
-from mlia.tools.aiet_wrapper import is_supported
-from mlia.tools.aiet_wrapper import ModelInfo
-from mlia.tools.aiet_wrapper import PerformanceMetrics
-from mlia.tools.aiet_wrapper import supported_backends
-from mlia.utils.proc import RunningCommand
+from mlia.backend.application import get_application
+from mlia.backend.common import DataPaths
+from mlia.backend.execution import ExecutionContext
+from mlia.backend.execution import run_application
+from mlia.backend.manager import BackendRunner
+from mlia.backend.manager import DeviceInfo
+from mlia.backend.manager import estimate_performance
+from mlia.backend.manager import ExecutionParams
+from mlia.backend.manager import GenericInferenceOutputParser
+from mlia.backend.manager import GenericInferenceRunnerEthosU
+from mlia.backend.manager import get_generic_runner
+from mlia.backend.manager import get_system_name
+from mlia.backend.manager import is_supported
+from mlia.backend.manager import ModelInfo
+from mlia.backend.manager import PerformanceMetrics
+from mlia.backend.manager import supported_backends
+from mlia.backend.system import get_system
@pytest.mark.parametrize(
@@ -108,16 +112,16 @@ def test_generic_inference_output_parser(
assert parser.missed_keys() == missed_keys
-class TestAIETRunner:
- """Tests for AIETRunner class."""
+class TestBackendRunner:
+ """Tests for BackendRunner class."""
@staticmethod
- def _setup_aiet(
+ def _setup_backends(
monkeypatch: pytest.MonkeyPatch,
available_systems: Optional[List[str]] = None,
available_apps: Optional[List[str]] = None,
) -> None:
- """Set up AIET metadata."""
+ """Set up backend metadata."""
def mock_system(system: str) -> MagicMock:
"""Mock the System instance."""
@@ -134,13 +138,13 @@ class TestAIETRunner:
system_mocks = [mock_system(name) for name in (available_systems or [])]
monkeypatch.setattr(
- "mlia.tools.aiet_wrapper.get_available_systems",
+ "mlia.backend.manager.get_available_systems",
MagicMock(return_value=system_mocks),
)
apps_mock = [mock_app(name) for name in (available_apps or [])]
monkeypatch.setattr(
- "mlia.tools.aiet_wrapper.get_available_applications",
+ "mlia.backend.manager.get_available_applications",
MagicMock(return_value=apps_mock),
)
@@ -159,13 +163,11 @@ class TestAIETRunner:
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test method is_system_installed."""
- mock_executor = MagicMock()
- aiet_runner = AIETRunner(mock_executor)
+ backend_runner = BackendRunner()
- self._setup_aiet(monkeypatch, available_systems)
+ self._setup_backends(monkeypatch, available_systems)
- assert aiet_runner.is_system_installed(system) == installed
- mock_executor.assert_not_called()
+ assert backend_runner.is_system_installed(system) == installed
@pytest.mark.parametrize(
"available_systems, systems",
@@ -181,28 +183,21 @@ class TestAIETRunner:
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test method installed_systems."""
- mock_executor = MagicMock()
- aiet_runner = AIETRunner(mock_executor)
+ backend_runner = BackendRunner()
- self._setup_aiet(monkeypatch, available_systems)
- assert aiet_runner.get_installed_systems() == systems
-
- mock_executor.assert_not_called()
+ self._setup_backends(monkeypatch, available_systems)
+ assert backend_runner.get_installed_systems() == systems
@staticmethod
def test_install_system(monkeypatch: pytest.MonkeyPatch) -> None:
"""Test system installation."""
install_system_mock = MagicMock()
- monkeypatch.setattr(
- "mlia.tools.aiet_wrapper.install_system", install_system_mock
- )
+ monkeypatch.setattr("mlia.backend.manager.install_system", install_system_mock)
- mock_executor = MagicMock()
- aiet_runner = AIETRunner(mock_executor)
- aiet_runner.install_system(Path("test_system_path"))
+ backend_runner = BackendRunner()
+ backend_runner.install_system(Path("test_system_path"))
install_system_mock.assert_called_once_with(Path("test_system_path"))
- mock_executor.assert_not_called()
@pytest.mark.parametrize(
"available_systems, systems, expected_result",
@@ -222,14 +217,11 @@ class TestAIETRunner:
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test method systems_installed."""
- self._setup_aiet(monkeypatch, available_systems)
+ self._setup_backends(monkeypatch, available_systems)
- mock_executor = MagicMock()
- aiet_runner = AIETRunner(mock_executor)
+ backend_runner = BackendRunner()
- assert aiet_runner.systems_installed(systems) is expected_result
-
- mock_executor.assert_not_called()
+ assert backend_runner.systems_installed(systems) is expected_result
@pytest.mark.parametrize(
"available_apps, applications, expected_result",
@@ -249,12 +241,10 @@ class TestAIETRunner:
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test method applications_installed."""
- self._setup_aiet(monkeypatch, [], available_apps)
- mock_executor = MagicMock()
- aiet_runner = AIETRunner(mock_executor)
+ self._setup_backends(monkeypatch, [], available_apps)
+ backend_runner = BackendRunner()
- assert aiet_runner.applications_installed(applications) is expected_result
- mock_executor.assert_not_called()
+ assert backend_runner.applications_installed(applications) is expected_result
@pytest.mark.parametrize(
"available_apps, applications",
@@ -273,30 +263,23 @@ class TestAIETRunner:
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test method get_installed_applications."""
- mock_executor = MagicMock()
- self._setup_aiet(monkeypatch, [], available_apps)
-
- aiet_runner = AIETRunner(mock_executor)
- assert applications == aiet_runner.get_installed_applications()
+ self._setup_backends(monkeypatch, [], available_apps)
- mock_executor.assert_not_called()
+ backend_runner = BackendRunner()
+ assert applications == backend_runner.get_installed_applications()
@staticmethod
def test_install_application(monkeypatch: pytest.MonkeyPatch) -> None:
"""Test application installation."""
mock_install_application = MagicMock()
monkeypatch.setattr(
- "mlia.tools.aiet_wrapper.install_application", mock_install_application
+ "mlia.backend.manager.install_application", mock_install_application
)
- mock_executor = MagicMock()
-
- aiet_runner = AIETRunner(mock_executor)
- aiet_runner.install_application(Path("test_application_path"))
+ backend_runner = BackendRunner()
+ backend_runner.install_application(Path("test_application_path"))
mock_install_application.assert_called_once_with(Path("test_application_path"))
- mock_executor.assert_not_called()
-
@pytest.mark.parametrize(
"available_apps, application, installed",
[
@@ -321,66 +304,113 @@ class TestAIETRunner:
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Test method is_application_installed."""
- self._setup_aiet(monkeypatch, [], available_apps)
-
- mock_executor = MagicMock()
- aiet_runner = AIETRunner(mock_executor)
- assert installed == aiet_runner.is_application_installed(application, "system1")
+ self._setup_backends(monkeypatch, [], available_apps)
- mock_executor.assert_not_called()
+ backend_runner = BackendRunner()
+ assert installed == backend_runner.is_application_installed(
+ application, "system1"
+ )
@staticmethod
@pytest.mark.parametrize(
"execution_params, expected_command",
[
(
- ExecutionParams("application1", "system1", [], [], []),
- ["aiet", "application", "run", "-n", "application1", "-s", "system1"],
+ ExecutionParams("application_4", "System 4", [], [], []),
+ ["application_4", [], "System 4", [], []],
),
(
ExecutionParams(
- "application1",
- "system1",
- ["input_file=123.txt", "size=777"],
- ["param1=456", "param2=789"],
+ "application_6",
+ "System 6",
+ ["param1=value2"],
+ ["sys-param1=value2"],
+ [],
+ ),
+ [
+ "application_6",
+ ["param1=value2"],
+ "System 6",
+ ["sys-param1=value2"],
+ [],
+ ],
+ ),
+ ],
+ )
+ def test_run_application_local(
+ monkeypatch: pytest.MonkeyPatch,
+ execution_params: ExecutionParams,
+ expected_command: List[str],
+ ) -> None:
+ """Test method run_application with local systems."""
+ run_app = MagicMock(wraps=run_application)
+ monkeypatch.setattr("mlia.backend.manager.run_application", run_app)
+
+ backend_runner = BackendRunner()
+ backend_runner.run_application(execution_params)
+
+ run_app.assert_called_once_with(*expected_command)
+
+ @staticmethod
+ @pytest.mark.parametrize(
+ "execution_params, expected_command",
+ [
+ (
+ ExecutionParams(
+ "application_1",
+ "System 1",
+ [],
+ [],
["source1.txt:dest1.txt", "source2.txt:dest2.txt"],
),
[
- "aiet",
- "application",
- "run",
- "-n",
- "application1",
- "-s",
- "system1",
- "-p",
- "input_file=123.txt",
- "-p",
- "size=777",
- "--system-param",
- "param1=456",
- "--system-param",
- "param2=789",
- "--deploy",
- "source1.txt:dest1.txt",
- "--deploy",
- "source2.txt:dest2.txt",
+ "application_1",
+ [],
+ "System 1",
+ [],
+ [
+ DataPaths(Path("source1.txt"), "dest1.txt"),
+ DataPaths(Path("source2.txt"), "dest2.txt"),
+ ],
],
),
],
)
- def test_run_application(
- execution_params: ExecutionParams, expected_command: List[str]
+ def test_run_application_connected(
+ monkeypatch: pytest.MonkeyPatch,
+ execution_params: ExecutionParams,
+ expected_command: List[str],
) -> None:
- """Test method run_application."""
- mock_executor = MagicMock()
- mock_running_command = MagicMock()
- mock_executor.submit.return_value = mock_running_command
+ """Test method run_application with connectable systems (SSH)."""
+ app = get_application(execution_params.application, execution_params.system)[0]
+ sys = get_system(execution_params.system)
+
+ assert sys is not None
+
+ connect_mock = MagicMock(return_value=True, name="connect_mock")
+ deploy_mock = MagicMock(return_value=True, name="deploy_mock")
+ run_mock = MagicMock(
+ return_value=(os.EX_OK, bytearray(), bytearray()), name="run_mock"
+ )
+ sys.establish_connection = connect_mock # type: ignore
+ sys.deploy = deploy_mock # type: ignore
+ sys.run = run_mock # type: ignore
+
+ monkeypatch.setattr(
+ "mlia.backend.execution.get_application_and_system",
+ MagicMock(return_value=(app, sys)),
+ )
- aiet_runner = AIETRunner(mock_executor)
- aiet_runner.run_application(execution_params)
+ run_app_mock = MagicMock(wraps=run_application)
+ monkeypatch.setattr("mlia.backend.manager.run_application", run_app_mock)
- mock_executor.submit.assert_called_once_with(expected_command)
+ backend_runner = BackendRunner()
+ backend_runner.run_application(execution_params)
+
+ run_app_mock.assert_called_once_with(*expected_command)
+
+ connect_mock.assert_called_once()
+ assert deploy_mock.call_count == 2
@pytest.mark.parametrize(
@@ -490,16 +520,16 @@ def test_estimate_performance(
backend: str,
expected_error: Any,
test_tflite_model: Path,
- aiet_runner: MagicMock,
+ backend_runner: MagicMock,
) -> None:
"""Test getting performance estimations."""
system_name, system_installed = system
application_name, application_installed = application
- aiet_runner.is_system_installed.return_value = system_installed
- aiet_runner.is_application_installed.return_value = application_installed
+ backend_runner.is_system_installed.return_value = system_installed
+ backend_runner.is_application_installed.return_value = application_installed
- mock_process = create_mock_process(
+ mock_context = create_mock_context(
[
"NPU AXI0_RD_DATA_BEAT_RECEIVED beats: 1",
"NPU AXI0_WR_DATA_BEAT_WRITTEN beats: 2",
@@ -507,12 +537,10 @@ def test_estimate_performance(
"NPU ACTIVE cycles: 4",
"NPU IDLE cycles: 5",
"NPU TOTAL cycles: 6",
- ],
- [],
+ ]
)
- mock_generic_inference_run = RunningCommand(mock_process)
- aiet_runner.run_application.return_value = mock_generic_inference_run
+ backend_runner.run_application.return_value = mock_context
with expected_error:
perf_metrics = estimate_performance(
@@ -529,19 +557,19 @@ def test_estimate_performance(
npu_total_cycles=6,
)
- assert aiet_runner.is_system_installed.called_once_with(system_name)
- assert aiet_runner.is_application_installed.called_once_with(
+ assert backend_runner.is_system_installed.called_once_with(system_name)
+ assert backend_runner.is_application_installed.called_once_with(
application_name, system_name
)
@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
def test_estimate_performance_insufficient_data(
- aiet_runner: MagicMock, test_tflite_model: Path, backend: str
+ backend_runner: MagicMock, test_tflite_model: Path, backend: str
) -> None:
"""Test that performance could not be estimated when not all data presented."""
- aiet_runner.is_system_installed.return_value = True
- aiet_runner.is_application_installed.return_value = True
+ backend_runner.is_system_installed.return_value = True
+ backend_runner.is_application_installed.return_value = True
no_total_cycles_output = [
"NPU AXI0_RD_DATA_BEAT_RECEIVED beats: 1",
@@ -550,13 +578,9 @@ def test_estimate_performance_insufficient_data(
"NPU ACTIVE cycles: 4",
"NPU IDLE cycles: 5",
]
- mock_process = create_mock_process(
- no_total_cycles_output,
- [],
- )
+ mock_context = create_mock_context(no_total_cycles_output)
- mock_generic_inference_run = RunningCommand(mock_process)
- aiet_runner.run_application.return_value = mock_generic_inference_run
+ backend_runner.run_application.return_value = mock_context
with pytest.raises(
Exception, match="Unable to get performance metrics, insufficient data"
@@ -567,16 +591,14 @@ def test_estimate_performance_insufficient_data(
@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
def test_estimate_performance_invalid_output(
- test_tflite_model: Path, aiet_runner: MagicMock, backend: str
+ test_tflite_model: Path, backend_runner: MagicMock, backend: str
) -> None:
"""Test estimation could not be done if inference produces unexpected output."""
- aiet_runner.is_system_installed.return_value = True
- aiet_runner.is_application_installed.return_value = True
+ backend_runner.is_system_installed.return_value = True
+ backend_runner.is_application_installed.return_value = True
- mock_process = create_mock_process(
- ["Something", "is", "wrong"], ["What a nice error!"]
- )
- aiet_runner.run_application.return_value = RunningCommand(mock_process)
+ mock_context = create_mock_context(["Something", "is", "wrong"])
+ backend_runner.run_application.return_value = mock_context
with pytest.raises(Exception, match="Unable to get performance metrics"):
estimate_performance(
@@ -586,12 +608,6 @@ def test_estimate_performance_invalid_output(
)
-def test_get_aiet_runner() -> None:
- """Test getting aiet runner."""
- aiet_runner = get_aiet_runner()
- assert isinstance(aiet_runner, AIETRunner)
-
-
def create_mock_process(stdout: List[str], stderr: List[str]) -> MagicMock:
"""Mock underlying process."""
mock_process = MagicMock()
@@ -601,6 +617,18 @@ def create_mock_process(stdout: List[str], stderr: List[str]) -> MagicMock:
return mock_process
+def create_mock_context(stdout: List[str]) -> ExecutionContext:
+ """Mock ExecutionContext."""
+ ctx = ExecutionContext(
+ app=get_application("application_1")[0],
+ app_params=[],
+ system=get_system("System 1"),
+ system_params=[],
+ )
+ ctx.stdout = bytearray("\n".join(stdout).encode("utf-8"))
+ return ctx
+
+
@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
def test_get_generic_runner(backend: str) -> None:
"""Test function get_generic_runner()."""
@@ -621,8 +649,8 @@ def test_get_generic_runner(backend: str) -> None:
("Corstone-310", "ethos-u55"),
),
)
-def test_aiet_backend_support(backend: str, device_type: str) -> None:
- """Test AIET backend & device support."""
+def test_backend_support(backend: str, device_type: str) -> None:
+ """Test backend & device support."""
assert is_supported(backend)
assert is_supported(backend, device_type)
@@ -714,10 +742,10 @@ class TestGenericInferenceRunnerEthosU:
@staticmethod
@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
def test_inference_should_fail_if_system_not_installed(
- aiet_runner: MagicMock, test_tflite_model: Path, backend: str
+ backend_runner: MagicMock, test_tflite_model: Path, backend: str
) -> None:
"""Test that inference should fail if system is not installed."""
- aiet_runner.is_system_installed.return_value = False
+ backend_runner.is_system_installed.return_value = False
generic_runner = get_generic_runner(
DeviceInfo("ethos-u55", 256, memory_mode="Shared_Sram"), backend
@@ -731,11 +759,11 @@ class TestGenericInferenceRunnerEthosU:
@staticmethod
@pytest.mark.parametrize("backend", ("Corstone-300", "Corstone-310"))
def test_inference_should_fail_is_apps_not_installed(
- aiet_runner: MagicMock, test_tflite_model: Path, backend: str
+ backend_runner: MagicMock, test_tflite_model: Path, backend: str
) -> None:
"""Test that inference should fail if apps are not installed."""
- aiet_runner.is_system_installed.return_value = True
- aiet_runner.is_application_installed.return_value = False
+ backend_runner.is_system_installed.return_value = True
+ backend_runner.is_application_installed.return_value = False
generic_runner = get_generic_runner(
DeviceInfo("ethos-u55", 256, memory_mode="Shared_Sram"), backend
@@ -749,12 +777,12 @@ class TestGenericInferenceRunnerEthosU:
generic_runner.run(ModelInfo(test_tflite_model), [])
-@pytest.fixture(name="aiet_runner")
-def fixture_aiet_runner(monkeypatch: pytest.MonkeyPatch) -> MagicMock:
- """Mock AIET runner."""
- aiet_runner_mock = MagicMock(spec=AIETRunner)
+@pytest.fixture(name="backend_runner")
+def fixture_backend_runner(monkeypatch: pytest.MonkeyPatch) -> MagicMock:
+ """Mock backend runner."""
+ backend_runner_mock = MagicMock(spec=BackendRunner)
monkeypatch.setattr(
- "mlia.tools.aiet_wrapper.get_aiet_runner",
- MagicMock(return_value=aiet_runner_mock),
+ "mlia.backend.manager.get_backend_runner",
+ MagicMock(return_value=backend_runner_mock),
)
- return aiet_runner_mock
+ return backend_runner_mock
diff --git a/tests/mlia/test_backend_output_parser.py b/tests/mlia/test_backend_output_parser.py
new file mode 100644
index 0000000..d86aac8
--- /dev/null
+++ b/tests/mlia/test_backend_output_parser.py
@@ -0,0 +1,152 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Tests for the output parsing."""
+import base64
+import json
+from typing import Any
+from typing import Dict
+
+import pytest
+
+from mlia.backend.output_parser import Base64OutputParser
+from mlia.backend.output_parser import OutputParser
+from mlia.backend.output_parser import RegexOutputParser
+
+
+OUTPUT_MATCH_ALL = bytearray(
+ """
+String1: My awesome string!
+String2: STRINGS_ARE_GREAT!!!
+Int: 12
+Float: 3.14
+""",
+ encoding="utf-8",
+)
+
+OUTPUT_NO_MATCH = bytearray(
+ """
+This contains no matches...
+Test1234567890!"£$%^&*()_+@~{}[]/.,<>?|
+""",
+ encoding="utf-8",
+)
+
+OUTPUT_PARTIAL_MATCH = bytearray(
+ "String1: My awesome string!",
+ encoding="utf-8",
+)
+
+REGEX_CONFIG = {
+ "FirstString": {"pattern": r"String1.*: (.*)", "type": "str"},
+ "SecondString": {"pattern": r"String2.*: (.*)!!!", "type": "str"},
+ "IntegerValue": {"pattern": r"Int.*: (.*)", "type": "int"},
+ "FloatValue": {"pattern": r"Float.*: (.*)", "type": "float"},
+}
+
+EMPTY_REGEX_CONFIG: Dict[str, Dict[str, Any]] = {}
+
+EXPECTED_METRICS_ALL = {
+ "FirstString": "My awesome string!",
+ "SecondString": "STRINGS_ARE_GREAT",
+ "IntegerValue": 12,
+ "FloatValue": 3.14,
+}
+
+EXPECTED_METRICS_PARTIAL = {
+ "FirstString": "My awesome string!",
+}
+
+
+class TestRegexOutputParser:
+ """Collect tests for the RegexOutputParser."""
+
+ @staticmethod
+ @pytest.mark.parametrize(
+ ["output", "config", "expected_metrics"],
+ [
+ (OUTPUT_MATCH_ALL, REGEX_CONFIG, EXPECTED_METRICS_ALL),
+ (OUTPUT_MATCH_ALL + OUTPUT_NO_MATCH, REGEX_CONFIG, EXPECTED_METRICS_ALL),
+ (OUTPUT_MATCH_ALL + OUTPUT_NO_MATCH, REGEX_CONFIG, EXPECTED_METRICS_ALL),
+ (
+ OUTPUT_MATCH_ALL + OUTPUT_PARTIAL_MATCH,
+ REGEX_CONFIG,
+ EXPECTED_METRICS_ALL,
+ ),
+ (OUTPUT_NO_MATCH, REGEX_CONFIG, {}),
+ (OUTPUT_MATCH_ALL, EMPTY_REGEX_CONFIG, {}),
+ (bytearray(), EMPTY_REGEX_CONFIG, {}),
+ (bytearray(), REGEX_CONFIG, {}),
+ ],
+ )
+ def test_parsing(output: bytearray, config: Dict, expected_metrics: Dict) -> None:
+ """
+ Make sure the RegexOutputParser yields valid results.
+
+ I.e. return an empty dict if either the input or the config is empty and
+ return the parsed metrics otherwise.
+ """
+ parser = RegexOutputParser(name="Test", regex_config=config)
+ assert parser.name == "Test"
+ assert isinstance(parser, OutputParser)
+ res = parser(output)
+ assert res == expected_metrics
+
+ @staticmethod
+ def test_unsupported_type() -> None:
+ """An unsupported type in the regex_config must raise an exception."""
+ config = {"BrokenMetric": {"pattern": "(.*)", "type": "UNSUPPORTED_TYPE"}}
+ with pytest.raises(TypeError):
+ RegexOutputParser(name="Test", regex_config=config)
+
+ @staticmethod
+ @pytest.mark.parametrize(
+ "config",
+ (
+ {"TooManyGroups": {"pattern": r"(\w)(\d)", "type": "str"}},
+ {"NoGroups": {"pattern": r"\W", "type": "str"}},
+ ),
+ )
+ def test_invalid_pattern(config: Dict) -> None:
+ """Exactly one capturing parenthesis is allowed in the regex pattern."""
+ with pytest.raises(ValueError):
+ RegexOutputParser(name="Test", regex_config=config)
+
+
+@pytest.mark.parametrize(
+ "expected_metrics",
+ [
+ EXPECTED_METRICS_ALL,
+ EXPECTED_METRICS_PARTIAL,
+ ],
+)
+def test_base64_output_parser(expected_metrics: Dict) -> None:
+ """
+ Make sure the Base64OutputParser yields valid results.
+
+ I.e. return an empty dict if either the input or the config is empty and
+ return the parsed metrics otherwise.
+ """
+ parser = Base64OutputParser(name="Test")
+ assert parser.name == "Test"
+ assert isinstance(parser, OutputParser)
+
+ def create_base64_output(expected_metrics: Dict) -> bytearray:
+ json_str = json.dumps(expected_metrics, indent=4)
+ json_b64 = base64.b64encode(json_str.encode("utf-8"))
+ return (
+ OUTPUT_MATCH_ALL # Should not be matched by the Base64OutputParser
+ + f"<{Base64OutputParser.TAG_NAME}>".encode("utf-8")
+ + bytearray(json_b64)
+ + f"</{Base64OutputParser.TAG_NAME}>".encode("utf-8")
+ + OUTPUT_NO_MATCH # Just to add some difficulty...
+ )
+
+ output = create_base64_output(expected_metrics)
+ res = parser(output)
+ assert len(res) == 1
+ assert isinstance(res, dict)
+ for val in res.values():
+ assert val == expected_metrics
+
+ output = parser.filter_out_parsed_content(output)
+ assert output == (OUTPUT_MATCH_ALL + OUTPUT_NO_MATCH)
diff --git a/tests/mlia/test_backend_proc.py b/tests/mlia/test_backend_proc.py
new file mode 100644
index 0000000..9ca4788
--- /dev/null
+++ b/tests/mlia/test_backend_proc.py
@@ -0,0 +1,272 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+# pylint: disable=attribute-defined-outside-init,no-self-use,not-callable
+"""Pytests for testing mlia/backend/proc.py."""
+from pathlib import Path
+from typing import Any
+from unittest import mock
+
+import psutil
+import pytest
+from sh import ErrorReturnCode
+
+from mlia.backend.proc import Command
+from mlia.backend.proc import CommandFailedException
+from mlia.backend.proc import CommandNotFound
+from mlia.backend.proc import parse_command
+from mlia.backend.proc import print_command_stdout
+from mlia.backend.proc import run_and_wait
+from mlia.backend.proc import save_process_info
+from mlia.backend.proc import ShellCommand
+from mlia.backend.proc import terminate_command
+from mlia.backend.proc import terminate_external_process
+
+
+class TestShellCommand:
+ """Sample class for collecting tests."""
+
+ def test_shellcommand_default_value(self) -> None:
+ """Test the instantiation of the class ShellCommand with no parameter."""
+ shell_command = ShellCommand()
+ assert shell_command.base_log_path == "/tmp"
+
+ @pytest.mark.parametrize(
+ "base_log_path,expected", [("/test", "/test"), ("/asd", "/asd")]
+ )
+ def test_shellcommand_with_param(self, base_log_path: str, expected: str) -> None:
+ """Test init ShellCommand with different parameters."""
+ shell_command = ShellCommand(base_log_path)
+ assert shell_command.base_log_path == expected
+
+ def test_run_ls(self, monkeypatch: Any) -> None:
+ """Test a simple ls command."""
+ mock_command = mock.MagicMock()
+ monkeypatch.setattr(Command, "bake", mock_command)
+
+ mock_get_stdout_stderr_paths = mock.MagicMock()
+ mock_get_stdout_stderr_paths.return_value = ("/tmp/std.out", "/tmp/std.err")
+ monkeypatch.setattr(
+ ShellCommand, "get_stdout_stderr_paths", mock_get_stdout_stderr_paths
+ )
+
+ shell_command = ShellCommand()
+ shell_command.run("ls", "-l")
+ assert mock_command.mock_calls[0] == mock.call(("-l",))
+ assert mock_command.mock_calls[1] == mock.call()(
+ _bg=True, _err="/tmp/std.err", _out="/tmp/std.out", _tee=True, _bg_exc=False
+ )
+
+ def test_run_command_not_found(self) -> None:
+ """Test whe the command doesn't exist."""
+ shell_command = ShellCommand()
+ with pytest.raises(CommandNotFound):
+ shell_command.run("lsl", "-l")
+
+ def test_get_stdout_stderr_paths_valid_path(self) -> None:
+ """Test the method to get files to store stdout and stderr."""
+ valid_path = "/tmp"
+ shell_command = ShellCommand(valid_path)
+ out, err = shell_command.get_stdout_stderr_paths(valid_path, "cmd")
+ assert out.exists() and out.is_file()
+ assert err.exists() and err.is_file()
+ assert "cmd" in out.name
+ assert "cmd" in err.name
+
+ def test_get_stdout_stderr_paths_not_invalid_path(self) -> None:
+ """Test the method to get output files with an invalid path."""
+ invalid_path = "/invalid/foo/bar"
+ shell_command = ShellCommand(invalid_path)
+ with pytest.raises(FileNotFoundError):
+ shell_command.get_stdout_stderr_paths(invalid_path, "cmd")
+
+
+@mock.patch("builtins.print")
+def test_print_command_stdout_alive(mock_print: Any) -> None:
+ """Test the print command stdout with an alive (running) process."""
+ mock_command = mock.MagicMock()
+ mock_command.is_alive.return_value = True
+ mock_command.next.side_effect = ["test1", "test2", StopIteration]
+
+ print_command_stdout(mock_command)
+
+ mock_command.assert_has_calls(
+ [mock.call.is_alive(), mock.call.next(), mock.call.next()]
+ )
+ mock_print.assert_has_calls(
+ [mock.call("test1", end=""), mock.call("test2", end="")]
+ )
+
+
+@mock.patch("builtins.print")
+def test_print_command_stdout_not_alive(mock_print: Any) -> None:
+ """Test the print command stdout with a not alive (exited) process."""
+ mock_command = mock.MagicMock()
+ mock_command.is_alive.return_value = False
+ mock_command.stdout = "test"
+
+ print_command_stdout(mock_command)
+ mock_command.assert_has_calls([mock.call.is_alive()])
+ mock_print.assert_called_once_with("test")
+
+
+def test_terminate_external_process_no_process(capsys: Any) -> None:
+ """Test that non existed process could be terminated."""
+ mock_command = mock.MagicMock()
+ mock_command.terminate.side_effect = psutil.Error("Error!")
+
+ terminate_external_process(mock_command)
+ captured = capsys.readouterr()
+ assert captured.out == "Unable to terminate process\n"
+
+
+def test_terminate_external_process_case1() -> None:
+ """Test when process terminated immediately."""
+ mock_command = mock.MagicMock()
+ mock_command.is_running.return_value = False
+
+ terminate_external_process(mock_command)
+ mock_command.terminate.assert_called_once()
+ mock_command.is_running.assert_called_once()
+
+
+def test_terminate_external_process_case2() -> None:
+ """Test when process termination takes time."""
+ mock_command = mock.MagicMock()
+ mock_command.is_running.side_effect = [True, True, False]
+
+ terminate_external_process(mock_command)
+ mock_command.terminate.assert_called_once()
+ assert mock_command.is_running.call_count == 3
+
+
+def test_terminate_external_process_case3() -> None:
+ """Test when process termination takes more time."""
+ mock_command = mock.MagicMock()
+ mock_command.is_running.side_effect = [True, True, True]
+
+ terminate_external_process(
+ mock_command, number_of_attempts=2, wait_period=0.1, wait_for_termination=0.1
+ )
+ assert mock_command.is_running.call_count == 3
+ assert mock_command.terminate.call_count == 2
+
+
+def test_terminate_external_process_case4() -> None:
+ """Test when process termination takes more time."""
+ mock_command = mock.MagicMock()
+ mock_command.is_running.side_effect = [True, True, False]
+
+ terminate_external_process(
+ mock_command, number_of_attempts=2, wait_period=0.1, wait_for_termination=0.1
+ )
+ mock_command.terminate.assert_called_once()
+ assert mock_command.is_running.call_count == 3
+ assert mock_command.terminate.call_count == 1
+
+
+def test_terminate_command_no_process() -> None:
+ """Test command termination when process does not exist."""
+ mock_command = mock.MagicMock()
+ mock_command.process.signal_group.side_effect = ProcessLookupError()
+
+ terminate_command(mock_command)
+ mock_command.process.signal_group.assert_called_once()
+ mock_command.is_alive.assert_not_called()
+
+
+def test_terminate_command() -> None:
+ """Test command termination."""
+ mock_command = mock.MagicMock()
+ mock_command.is_alive.return_value = False
+
+ terminate_command(mock_command)
+ mock_command.process.signal_group.assert_called_once()
+
+
+def test_terminate_command_case1() -> None:
+ """Test command termination when it takes time.."""
+ mock_command = mock.MagicMock()
+ mock_command.is_alive.side_effect = [True, True, False]
+
+ terminate_command(mock_command, wait_period=0.1)
+ mock_command.process.signal_group.assert_called_once()
+ assert mock_command.is_alive.call_count == 3
+
+
+def test_terminate_command_case2() -> None:
+ """Test command termination when it takes much time.."""
+ mock_command = mock.MagicMock()
+ mock_command.is_alive.side_effect = [True, True, True]
+
+ terminate_command(mock_command, number_of_attempts=3, wait_period=0.1)
+ assert mock_command.is_alive.call_count == 3
+ assert mock_command.process.signal_group.call_count == 2
+
+
+class TestRunAndWait:
+ """Test run_and_wait function."""
+
+ @pytest.fixture(autouse=True)
+ def setup_method(self, monkeypatch: Any) -> None:
+ """Init test method."""
+ self.execute_command_mock = mock.MagicMock()
+ monkeypatch.setattr(
+ "mlia.backend.proc.execute_command", self.execute_command_mock
+ )
+
+ self.terminate_command_mock = mock.MagicMock()
+ monkeypatch.setattr(
+ "mlia.backend.proc.terminate_command", self.terminate_command_mock
+ )
+
+ def test_if_execute_command_raises_exception(self) -> None:
+ """Test if execute_command fails."""
+ self.execute_command_mock.side_effect = Exception("Error!")
+ with pytest.raises(Exception, match="Error!"):
+ run_and_wait("command", Path.cwd())
+
+ def test_if_command_finishes_with_error(self) -> None:
+ """Test if command finishes with error."""
+ cmd_mock = mock.MagicMock()
+ self.execute_command_mock.return_value = cmd_mock
+ exit_code_mock = mock.PropertyMock(
+ side_effect=ErrorReturnCode("cmd", bytearray(), bytearray())
+ )
+ type(cmd_mock).exit_code = exit_code_mock
+
+ with pytest.raises(CommandFailedException):
+ run_and_wait("command", Path.cwd())
+
+ @pytest.mark.parametrize("terminate_on_error, call_count", ((False, 0), (True, 1)))
+ def test_if_command_finishes_with_exception(
+ self, terminate_on_error: bool, call_count: int
+ ) -> None:
+ """Test if command finishes with error."""
+ cmd_mock = mock.MagicMock()
+ self.execute_command_mock.return_value = cmd_mock
+ exit_code_mock = mock.PropertyMock(side_effect=Exception("Error!"))
+ type(cmd_mock).exit_code = exit_code_mock
+
+ with pytest.raises(Exception, match="Error!"):
+ run_and_wait("command", Path.cwd(), terminate_on_error=terminate_on_error)
+
+ assert self.terminate_command_mock.call_count == call_count
+
+
+def test_save_process_info_no_process(monkeypatch: Any, tmpdir: Any) -> None:
+ """Test save_process_info function."""
+ mock_process = mock.MagicMock()
+ monkeypatch.setattr("psutil.Process", mock.MagicMock(return_value=mock_process))
+ mock_process.children.side_effect = psutil.NoSuchProcess(555)
+
+ pid_file_path = Path(tmpdir) / "test.pid"
+ save_process_info(555, pid_file_path)
+ assert not pid_file_path.exists()
+
+
+def test_parse_command() -> None:
+ """Test parse_command function."""
+ assert parse_command("1.sh") == ["bash", "1.sh"]
+ assert parse_command("1.sh", shell="sh") == ["sh", "1.sh"]
+ assert parse_command("command") == ["command"]
+ assert parse_command("command 123 --param=1") == ["command", "123", "--param=1"]
diff --git a/tests/mlia/test_backend_protocol.py b/tests/mlia/test_backend_protocol.py
new file mode 100644
index 0000000..35e9986
--- /dev/null
+++ b/tests/mlia/test_backend_protocol.py
@@ -0,0 +1,231 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+# pylint: disable=no-self-use,attribute-defined-outside-init,protected-access
+"""Tests for the protocol backend module."""
+from contextlib import ExitStack as does_not_raise
+from pathlib import Path
+from typing import Any
+from unittest.mock import MagicMock
+
+import paramiko
+import pytest
+
+from mlia.backend.common import ConfigurationException
+from mlia.backend.config import LocalProtocolConfig
+from mlia.backend.protocol import CustomSFTPClient
+from mlia.backend.protocol import LocalProtocol
+from mlia.backend.protocol import ProtocolFactory
+from mlia.backend.protocol import SSHProtocol
+
+
+class TestProtocolFactory:
+ """Test ProtocolFactory class."""
+
+ @pytest.mark.parametrize(
+ "config, expected_class, exception",
+ [
+ (
+ {
+ "protocol": "ssh",
+ "username": "user",
+ "password": "pass",
+ "hostname": "hostname",
+ "port": "22",
+ },
+ SSHProtocol,
+ does_not_raise(),
+ ),
+ ({"protocol": "local"}, LocalProtocol, does_not_raise()),
+ (
+ {"protocol": "something"},
+ None,
+ pytest.raises(Exception, match="Protocol not supported"),
+ ),
+ (None, None, pytest.raises(Exception, match="No protocol config provided")),
+ ],
+ )
+ def test_get_protocol(
+ self, config: Any, expected_class: type, exception: Any
+ ) -> None:
+ """Test get_protocol method."""
+ factory = ProtocolFactory()
+ with exception:
+ protocol = factory.get_protocol(config)
+ assert isinstance(protocol, expected_class)
+
+
+class TestLocalProtocol:
+ """Test local protocol."""
+
+ def test_local_protocol_run_command(self) -> None:
+ """Test local protocol run command."""
+ config = LocalProtocolConfig(protocol="local")
+ protocol = LocalProtocol(config, cwd=Path("/tmp"))
+ ret, stdout, stderr = protocol.run("pwd")
+ assert ret == 0
+ assert stdout.decode("utf-8").strip() == "/tmp"
+ assert stderr.decode("utf-8") == ""
+
+ def test_local_protocol_run_wrong_cwd(self) -> None:
+ """Execution should fail if wrong working directory provided."""
+ config = LocalProtocolConfig(protocol="local")
+ protocol = LocalProtocol(config, cwd=Path("unknown_directory"))
+ with pytest.raises(
+ ConfigurationException, match="Wrong working directory unknown_directory"
+ ):
+ protocol.run("pwd")
+
+
+class TestSSHProtocol:
+ """Test SSH protocol."""
+
+ @pytest.fixture(autouse=True)
+ def setup_method(self, monkeypatch: Any) -> None:
+ """Set up protocol mocks."""
+ self.mock_ssh_client = MagicMock(spec=paramiko.client.SSHClient)
+
+ self.mock_ssh_channel = (
+ self.mock_ssh_client.get_transport.return_value.open_session.return_value
+ )
+ self.mock_ssh_channel.mock_add_spec(spec=paramiko.channel.Channel)
+ self.mock_ssh_channel.exit_status_ready.side_effect = [False, True]
+ self.mock_ssh_channel.recv_exit_status.return_value = True
+ self.mock_ssh_channel.recv_ready.side_effect = [False, True]
+ self.mock_ssh_channel.recv_stderr_ready.side_effect = [False, True]
+
+ monkeypatch.setattr(
+ "mlia.backend.protocol.paramiko.client.SSHClient",
+ MagicMock(return_value=self.mock_ssh_client),
+ )
+
+ self.mock_sftp_client = MagicMock(spec=CustomSFTPClient)
+ monkeypatch.setattr(
+ "mlia.backend.protocol.CustomSFTPClient.from_transport",
+ MagicMock(return_value=self.mock_sftp_client),
+ )
+
+ ssh_config = {
+ "protocol": "ssh",
+ "username": "user",
+ "password": "pass",
+ "hostname": "hostname",
+ "port": "22",
+ }
+ self.protocol = SSHProtocol(ssh_config)
+
+ def test_unable_create_ssh_client(self, monkeypatch: Any) -> None:
+ """Test that command should fail if unable to create ssh client instance."""
+ monkeypatch.setattr(
+ "mlia.backend.protocol.paramiko.client.SSHClient",
+ MagicMock(side_effect=OSError("Error!")),
+ )
+
+ with pytest.raises(Exception, match="Couldn't connect to 'hostname:22'"):
+ self.protocol.run("command_example", retry=False)
+
+ def test_ssh_protocol_run_command(self) -> None:
+ """Test that command run via ssh successfully."""
+ self.protocol.run("command_example")
+ self.mock_ssh_channel.exec_command.assert_called_once()
+
+ def test_ssh_protocol_run_command_connect_failed(self) -> None:
+ """Test that if connection is not possible then correct exception is raised."""
+ self.mock_ssh_client.connect.side_effect = OSError("Unable to connect")
+ self.mock_ssh_client.close.side_effect = Exception("Error!")
+
+ with pytest.raises(Exception, match="Couldn't connect to 'hostname:22'"):
+ self.protocol.run("command_example", retry=False)
+
+ def test_ssh_protocol_run_command_bad_transport(self) -> None:
+ """Test that command should fail if unable to get transport."""
+ self.mock_ssh_client.get_transport.return_value = None
+
+ with pytest.raises(Exception, match="Unable to get transport"):
+ self.protocol.run("command_example", retry=False)
+
+ def test_ssh_protocol_deploy_command_file(
+ self, test_applications_path: Path
+ ) -> None:
+ """Test that files could be deployed over ssh."""
+ file_for_deploy = test_applications_path / "readme.txt"
+ dest = "/tmp/dest"
+
+ self.protocol.deploy(file_for_deploy, dest)
+ self.mock_sftp_client.put.assert_called_once_with(str(file_for_deploy), dest)
+
+ def test_ssh_protocol_deploy_command_unknown_file(self) -> None:
+ """Test that deploy will fail if file does not exist."""
+ with pytest.raises(Exception, match="Deploy error: file type not supported"):
+ self.protocol.deploy(Path("unknown_file"), "/tmp/dest")
+
+ def test_ssh_protocol_deploy_command_bad_transport(self) -> None:
+ """Test that deploy should fail if unable to get transport."""
+ self.mock_ssh_client.get_transport.return_value = None
+
+ with pytest.raises(Exception, match="Unable to get transport"):
+ self.protocol.deploy(Path("some_file"), "/tmp/dest")
+
+ def test_ssh_protocol_deploy_command_directory(
+ self, test_resources_path: Path
+ ) -> None:
+ """Test that directory could be deployed over ssh."""
+ directory_for_deploy = test_resources_path / "scripts"
+ dest = "/tmp/dest"
+
+ self.protocol.deploy(directory_for_deploy, dest)
+ self.mock_sftp_client.put_dir.assert_called_once_with(
+ directory_for_deploy, dest
+ )
+
+ @pytest.mark.parametrize("establish_connection", (True, False))
+ def test_ssh_protocol_close(self, establish_connection: bool) -> None:
+ """Test protocol close operation."""
+ if establish_connection:
+ self.protocol.establish_connection()
+ self.protocol.close()
+
+ call_count = 1 if establish_connection else 0
+ assert self.mock_ssh_channel.exec_command.call_count == call_count
+
+ def test_connection_details(self) -> None:
+ """Test getting connection details."""
+ assert self.protocol.connection_details() == ("hostname", 22)
+
+
+class TestCustomSFTPClient:
+ """Test CustomSFTPClient class."""
+
+ @pytest.fixture(autouse=True)
+ def setup_method(self, monkeypatch: Any) -> None:
+ """Set up mocks for CustomSFTPClient instance."""
+ self.mock_mkdir = MagicMock()
+ self.mock_put = MagicMock()
+ monkeypatch.setattr(
+ "mlia.backend.protocol.paramiko.SFTPClient.__init__",
+ MagicMock(return_value=None),
+ )
+ monkeypatch.setattr(
+ "mlia.backend.protocol.paramiko.SFTPClient.mkdir", self.mock_mkdir
+ )
+ monkeypatch.setattr(
+ "mlia.backend.protocol.paramiko.SFTPClient.put", self.mock_put
+ )
+
+ self.sftp_client = CustomSFTPClient(MagicMock())
+
+ def test_put_dir(self, test_systems_path: Path) -> None:
+ """Test deploying directory to remote host."""
+ directory_for_deploy = test_systems_path / "system1"
+
+ self.sftp_client.put_dir(directory_for_deploy, "/tmp/dest")
+ assert self.mock_put.call_count == 3
+ assert self.mock_mkdir.call_count == 3
+
+ def test_mkdir(self) -> None:
+ """Test creating directory on remote host."""
+ self.mock_mkdir.side_effect = IOError("Cannot create directory")
+
+ self.sftp_client._mkdir("new_directory", ignore_existing=True)
+
+ with pytest.raises(IOError, match="Cannot create directory"):
+ self.sftp_client._mkdir("new_directory", ignore_existing=False)
diff --git a/tests/mlia/test_backend_source.py b/tests/mlia/test_backend_source.py
new file mode 100644
index 0000000..84a6a77
--- /dev/null
+++ b/tests/mlia/test_backend_source.py
@@ -0,0 +1,203 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+# pylint: disable=no-self-use
+"""Tests for the source backend module."""
+from collections import Counter
+from contextlib import ExitStack as does_not_raise
+from pathlib import Path
+from typing import Any
+from unittest.mock import MagicMock
+from unittest.mock import patch
+
+import pytest
+
+from mlia.backend.common import ConfigurationException
+from mlia.backend.source import create_destination_and_install
+from mlia.backend.source import DirectorySource
+from mlia.backend.source import get_source
+from mlia.backend.source import TarArchiveSource
+
+
+def test_create_destination_and_install(test_systems_path: Path, tmpdir: Any) -> None:
+ """Test create_destination_and_install function."""
+ system_directory = test_systems_path / "system1"
+
+ dir_source = DirectorySource(system_directory)
+ resources = Path(tmpdir)
+ create_destination_and_install(dir_source, resources)
+ assert (resources / "system1").is_dir()
+
+
+@patch("mlia.backend.source.DirectorySource.create_destination", return_value=False)
+def test_create_destination_and_install_if_dest_creation_not_required(
+ mock_ds_create_destination: Any, tmpdir: Any
+) -> None:
+ """Test create_destination_and_install function."""
+ dir_source = DirectorySource(Path("unknown"))
+ resources = Path(tmpdir)
+ with pytest.raises(Exception):
+ create_destination_and_install(dir_source, resources)
+
+ mock_ds_create_destination.assert_called_once()
+
+
+def test_create_destination_and_install_if_installation_fails(tmpdir: Any) -> None:
+ """Test create_destination_and_install function if installation fails."""
+ dir_source = DirectorySource(Path("unknown"))
+ resources = Path(tmpdir)
+ with pytest.raises(Exception, match="Directory .* does not exist"):
+ create_destination_and_install(dir_source, resources)
+ assert not (resources / "unknown").exists()
+ assert resources.exists()
+
+
+def test_create_destination_and_install_if_name_is_empty() -> None:
+ """Test create_destination_and_install function fails if source name is empty."""
+ source = MagicMock()
+ source.create_destination.return_value = True
+ source.name.return_value = None
+
+ with pytest.raises(Exception, match="Unable to get source name"):
+ create_destination_and_install(source, Path("some_path"))
+
+ source.install_into.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "source_path, expected_class, expected_error",
+ [
+ (
+ Path("backends/applications/application1/"),
+ DirectorySource,
+ does_not_raise(),
+ ),
+ (
+ Path("archives/applications/application1.tar.gz"),
+ TarArchiveSource,
+ does_not_raise(),
+ ),
+ (
+ Path("doesnt/exist"),
+ None,
+ pytest.raises(
+ ConfigurationException, match="Unable to read .*doesnt/exist"
+ ),
+ ),
+ ],
+)
+def test_get_source(
+ source_path: Path,
+ expected_class: Any,
+ expected_error: Any,
+ test_resources_path: Path,
+) -> None:
+ """Test get_source function."""
+ with expected_error:
+ full_source_path = test_resources_path / source_path
+ source = get_source(full_source_path)
+ assert isinstance(source, expected_class)
+
+
+class TestDirectorySource:
+ """Test DirectorySource class."""
+
+ @pytest.mark.parametrize(
+ "directory, name",
+ [
+ (Path("/some/path/some_system"), "some_system"),
+ (Path("some_system"), "some_system"),
+ ],
+ )
+ def test_name(self, directory: Path, name: str) -> None:
+ """Test getting source name."""
+ assert DirectorySource(directory).name() == name
+
+ def test_install_into(self, test_systems_path: Path, tmpdir: Any) -> None:
+ """Test install directory into destination."""
+ system_directory = test_systems_path / "system1"
+
+ dir_source = DirectorySource(system_directory)
+ with pytest.raises(Exception, match="Wrong destination .*"):
+ dir_source.install_into(Path("unknown_destination"))
+
+ tmpdir_path = Path(tmpdir)
+ dir_source.install_into(tmpdir_path)
+ source_files = [f.name for f in system_directory.iterdir()]
+ dest_files = [f.name for f in tmpdir_path.iterdir()]
+ assert Counter(source_files) == Counter(dest_files)
+
+ def test_install_into_unknown_source_directory(self, tmpdir: Any) -> None:
+ """Test install system from unknown directory."""
+ with pytest.raises(Exception, match="Directory .* does not exist"):
+ DirectorySource(Path("unknown_directory")).install_into(Path(tmpdir))
+
+
+class TestTarArchiveSource:
+ """Test TarArchiveSource class."""
+
+ @pytest.mark.parametrize(
+ "archive, name",
+ [
+ (Path("some_archive.tgz"), "some_archive"),
+ (Path("some_archive.tar.gz"), "some_archive"),
+ (Path("some_archive"), "some_archive"),
+ ("archives/systems/system1.tar.gz", "system1"),
+ ("archives/systems/system1_dir.tar.gz", "system1"),
+ ],
+ )
+ def test_name(self, test_resources_path: Path, archive: Path, name: str) -> None:
+ """Test getting source name."""
+ assert TarArchiveSource(test_resources_path / archive).name() == name
+
+ def test_install_into(self, test_resources_path: Path, tmpdir: Any) -> None:
+ """Test install archive into destination."""
+ system_archive = test_resources_path / "archives/systems/system1.tar.gz"
+
+ tar_source = TarArchiveSource(system_archive)
+ with pytest.raises(Exception, match="Wrong destination .*"):
+ tar_source.install_into(Path("unknown_destination"))
+
+ tmpdir_path = Path(tmpdir)
+ tar_source.install_into(tmpdir_path)
+ source_files = [
+ "aiet-config.json.license",
+ "aiet-config.json",
+ "system_artifact",
+ ]
+ dest_files = [f.name for f in tmpdir_path.iterdir()]
+ assert Counter(source_files) == Counter(dest_files)
+
+ def test_install_into_unknown_source_archive(self, tmpdir: Any) -> None:
+ """Test install unknown source archive."""
+ with pytest.raises(Exception, match="File .* does not exist"):
+ TarArchiveSource(Path("unknown.tar.gz")).install_into(Path(tmpdir))
+
+ def test_install_into_unsupported_source_archive(self, tmpdir: Any) -> None:
+ """Test install unsupported file type."""
+ plain_text_file = Path(tmpdir) / "test_file"
+ plain_text_file.write_text("Not a system config")
+
+ with pytest.raises(Exception, match="Unsupported archive type .*"):
+ TarArchiveSource(plain_text_file).install_into(Path(tmpdir))
+
+ def test_lazy_property_init(self, test_resources_path: Path) -> None:
+ """Test that class properties initialized correctly."""
+ system_archive = test_resources_path / "archives/systems/system1.tar.gz"
+
+ tar_source = TarArchiveSource(system_archive)
+ assert tar_source.name() == "system1"
+ assert tar_source.config() is not None
+ assert tar_source.create_destination()
+
+ tar_source = TarArchiveSource(system_archive)
+ assert tar_source.config() is not None
+ assert tar_source.create_destination()
+ assert tar_source.name() == "system1"
+
+ def test_create_destination_property(self, test_resources_path: Path) -> None:
+ """Test create_destination property filled correctly for different archives."""
+ system_archive1 = test_resources_path / "archives/systems/system1.tar.gz"
+ system_archive2 = test_resources_path / "archives/systems/system1_dir.tar.gz"
+
+ assert TarArchiveSource(system_archive1).create_destination()
+ assert not TarArchiveSource(system_archive2).create_destination()
diff --git a/tests/mlia/test_backend_system.py b/tests/mlia/test_backend_system.py
new file mode 100644
index 0000000..21187ff
--- /dev/null
+++ b/tests/mlia/test_backend_system.py
@@ -0,0 +1,541 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Tests for system backend."""
+from contextlib import ExitStack as does_not_raise
+from pathlib import Path
+from typing import Any
+from typing import Callable
+from typing import Dict
+from typing import List
+from typing import Optional
+from typing import Tuple
+from unittest.mock import MagicMock
+
+import pytest
+
+from mlia.backend.common import Command
+from mlia.backend.common import ConfigurationException
+from mlia.backend.common import Param
+from mlia.backend.common import UserParamConfig
+from mlia.backend.config import LocalProtocolConfig
+from mlia.backend.config import ProtocolConfig
+from mlia.backend.config import SSHConfig
+from mlia.backend.config import SystemConfig
+from mlia.backend.controller import SystemController
+from mlia.backend.controller import SystemControllerSingleInstance
+from mlia.backend.protocol import LocalProtocol
+from mlia.backend.protocol import SSHProtocol
+from mlia.backend.protocol import SupportsClose
+from mlia.backend.protocol import SupportsDeploy
+from mlia.backend.system import ControlledSystem
+from mlia.backend.system import get_available_systems
+from mlia.backend.system import get_controller
+from mlia.backend.system import get_system
+from mlia.backend.system import install_system
+from mlia.backend.system import load_system
+from mlia.backend.system import remove_system
+from mlia.backend.system import StandaloneSystem
+from mlia.backend.system import System
+
+
+def dummy_resolver(
+ values: Optional[Dict[str, str]] = None
+) -> Callable[[str, str, List[Tuple[Optional[str], Param]]], str]:
+ """Return dummy parameter resolver implementation."""
+ # pylint: disable=unused-argument
+ def resolver(
+ param: str, cmd: str, param_values: List[Tuple[Optional[str], Param]]
+ ) -> str:
+ """Implement dummy parameter resolver."""
+ return values.get(param, "") if values else ""
+
+ return resolver
+
+
+def test_get_available_systems() -> None:
+ """Test get_available_systems mocking get_resources."""
+ available_systems = get_available_systems()
+ assert all(isinstance(s, System) for s in available_systems)
+ assert len(available_systems) == 4
+ assert [str(s) for s in available_systems] == [
+ "System 1",
+ "System 2",
+ "System 4",
+ "System 6",
+ ]
+
+
+def test_get_system() -> None:
+ """Test get_system."""
+ system1 = get_system("System 1")
+ assert isinstance(system1, ControlledSystem)
+ assert system1.connectable is True
+ assert system1.connection_details() == ("localhost", 8021)
+ assert system1.name == "System 1"
+
+ system2 = get_system("System 2")
+ # check that comparison with object of another type returns false
+ assert system1 != 42
+ assert system1 != system2
+
+ system = get_system("Unknown system")
+ assert system is None
+
+
+@pytest.mark.parametrize(
+ "source, call_count, exception_type",
+ (
+ (
+ "archives/systems/system1.tar.gz",
+ 0,
+ pytest.raises(Exception, match="Systems .* are already installed"),
+ ),
+ (
+ "archives/systems/system3.tar.gz",
+ 0,
+ pytest.raises(Exception, match="Unable to read system definition"),
+ ),
+ (
+ "backends/systems/system1",
+ 0,
+ pytest.raises(Exception, match="Systems .* are already installed"),
+ ),
+ (
+ "backends/systems/system3",
+ 0,
+ pytest.raises(Exception, match="Unable to read system definition"),
+ ),
+ ("unknown_path", 0, pytest.raises(Exception, match="Unable to read")),
+ (
+ "various/systems/system_with_empty_config",
+ 0,
+ pytest.raises(Exception, match="No system definition found"),
+ ),
+ ("various/systems/system_with_valid_config", 1, does_not_raise()),
+ ),
+)
+def test_install_system(
+ monkeypatch: Any,
+ test_resources_path: Path,
+ source: str,
+ call_count: int,
+ exception_type: Any,
+) -> None:
+ """Test system installation from archive."""
+ mock_create_destination_and_install = MagicMock()
+ monkeypatch.setattr(
+ "mlia.backend.system.create_destination_and_install",
+ mock_create_destination_and_install,
+ )
+
+ with exception_type:
+ install_system(test_resources_path / source)
+
+ assert mock_create_destination_and_install.call_count == call_count
+
+
+def test_remove_system(monkeypatch: Any) -> None:
+ """Test system removal."""
+ mock_remove_backend = MagicMock()
+ monkeypatch.setattr("mlia.backend.system.remove_backend", mock_remove_backend)
+ remove_system("some_system_dir")
+ mock_remove_backend.assert_called_once()
+
+
+def test_system(monkeypatch: Any) -> None:
+ """Test the System class."""
+ config = SystemConfig(name="System 1")
+ monkeypatch.setattr("mlia.backend.system.ProtocolFactory", MagicMock())
+ system = System(config)
+ assert str(system) == "System 1"
+ assert system.name == "System 1"
+
+
+def test_system_with_empty_parameter_name() -> None:
+ """Test that configuration fails if parameter name is empty."""
+ bad_config = SystemConfig(
+ name="System 1",
+ commands={"run": ["run"]},
+ user_params={"run": [{"name": "", "values": ["1", "2", "3"]}]},
+ )
+ with pytest.raises(Exception, match="Parameter has an empty 'name' attribute."):
+ System(bad_config)
+
+
+def test_system_standalone_run() -> None:
+ """Test run operation for standalone system."""
+ system = get_system("System 4")
+ assert isinstance(system, StandaloneSystem)
+
+ with pytest.raises(
+ ConfigurationException, match="System .* does not support connections"
+ ):
+ system.connection_details()
+
+ with pytest.raises(
+ ConfigurationException, match="System .* does not support connections"
+ ):
+ system.establish_connection()
+
+ assert system.connectable is False
+
+ system.run("echo 'application run'")
+
+
+@pytest.mark.parametrize(
+ "system_name, expected_value", [("System 1", True), ("System 4", False)]
+)
+def test_system_supports_deploy(system_name: str, expected_value: bool) -> None:
+ """Test system property supports_deploy."""
+ system = get_system(system_name)
+ if system is None:
+ pytest.fail("Unable to get system {}".format(system_name))
+ assert system.supports_deploy == expected_value
+
+
+@pytest.mark.parametrize(
+ "mock_protocol",
+ [
+ MagicMock(spec=SSHProtocol),
+ MagicMock(
+ spec=SSHProtocol,
+ **{"close.side_effect": ValueError("Unable to close protocol")}
+ ),
+ MagicMock(spec=LocalProtocol),
+ ],
+)
+def test_system_start_and_stop(monkeypatch: Any, mock_protocol: MagicMock) -> None:
+ """Test system start, run commands and stop."""
+ monkeypatch.setattr(
+ "mlia.backend.system.ProtocolFactory.get_protocol",
+ MagicMock(return_value=mock_protocol),
+ )
+
+ system = get_system("System 1")
+ if system is None:
+ pytest.fail("Unable to get system")
+ assert isinstance(system, ControlledSystem)
+
+ with pytest.raises(Exception, match="System has not been started"):
+ system.stop()
+
+ assert not system.is_running()
+ assert system.get_output() == ("", "")
+ system.start(["sleep 10"], False)
+ assert system.is_running()
+ system.stop(wait=True)
+ assert not system.is_running()
+ assert system.get_output() == ("", "")
+
+ if isinstance(mock_protocol, SupportsClose):
+ mock_protocol.close.assert_called_once()
+
+ if isinstance(mock_protocol, SSHProtocol):
+ system.establish_connection()
+
+
+def test_system_start_no_config_location() -> None:
+ """Test that system without config location could not start."""
+ system = load_system(
+ SystemConfig(
+ name="test",
+ data_transfer=SSHConfig(
+ protocol="ssh",
+ username="user",
+ password="user",
+ hostname="localhost",
+ port="123",
+ ),
+ )
+ )
+
+ assert isinstance(system, ControlledSystem)
+ with pytest.raises(
+ ConfigurationException, match="System test has wrong config location"
+ ):
+ system.start(["sleep 100"])
+
+
+@pytest.mark.parametrize(
+ "config, expected_class, expected_error",
+ [
+ (
+ SystemConfig(
+ name="test",
+ data_transfer=SSHConfig(
+ protocol="ssh",
+ username="user",
+ password="user",
+ hostname="localhost",
+ port="123",
+ ),
+ ),
+ ControlledSystem,
+ does_not_raise(),
+ ),
+ (
+ SystemConfig(
+ name="test", data_transfer=LocalProtocolConfig(protocol="local")
+ ),
+ StandaloneSystem,
+ does_not_raise(),
+ ),
+ (
+ SystemConfig(
+ name="test",
+ data_transfer=ProtocolConfig(protocol="cool_protocol"), # type: ignore
+ ),
+ None,
+ pytest.raises(
+ Exception, match="Unsupported execution type for protocol cool_protocol"
+ ),
+ ),
+ ],
+)
+def test_load_system(
+ config: SystemConfig, expected_class: type, expected_error: Any
+) -> None:
+ """Test load_system function."""
+ if not expected_class:
+ with expected_error:
+ load_system(config)
+ else:
+ system = load_system(config)
+ assert isinstance(system, expected_class)
+
+
+def test_load_system_populate_shared_params() -> None:
+ """Test shared parameters population."""
+ with pytest.raises(Exception, match="All shared parameters should have aliases"):
+ load_system(
+ SystemConfig(
+ name="test_system",
+ data_transfer=LocalProtocolConfig(protocol="local"),
+ user_params={
+ "shared": [
+ UserParamConfig(
+ name="--shared_param1",
+ description="Shared parameter",
+ values=["1", "2", "3"],
+ default_value="1",
+ )
+ ]
+ },
+ )
+ )
+
+ with pytest.raises(
+ Exception, match="All parameters for command run should have aliases"
+ ):
+ load_system(
+ SystemConfig(
+ name="test_system",
+ data_transfer=LocalProtocolConfig(protocol="local"),
+ user_params={
+ "shared": [
+ UserParamConfig(
+ name="--shared_param1",
+ description="Shared parameter",
+ values=["1", "2", "3"],
+ default_value="1",
+ alias="shared_param1",
+ )
+ ],
+ "run": [
+ UserParamConfig(
+ name="--run_param1",
+ description="Run specific parameter",
+ values=["1", "2", "3"],
+ default_value="2",
+ )
+ ],
+ },
+ )
+ )
+ system0 = load_system(
+ SystemConfig(
+ name="test_system",
+ data_transfer=LocalProtocolConfig(protocol="local"),
+ commands={"run": ["run_command"]},
+ user_params={
+ "shared": [],
+ "run": [
+ UserParamConfig(
+ name="--run_param1",
+ description="Run specific parameter",
+ values=["1", "2", "3"],
+ default_value="2",
+ alias="run_param1",
+ )
+ ],
+ },
+ )
+ )
+ assert len(system0.commands) == 1
+ run_command1 = system0.commands["run"]
+ assert run_command1 == Command(
+ ["run_command"],
+ [
+ Param(
+ "--run_param1",
+ "Run specific parameter",
+ ["1", "2", "3"],
+ "2",
+ "run_param1",
+ )
+ ],
+ )
+
+ system1 = load_system(
+ SystemConfig(
+ name="test_system",
+ data_transfer=LocalProtocolConfig(protocol="local"),
+ user_params={
+ "shared": [
+ UserParamConfig(
+ name="--shared_param1",
+ description="Shared parameter",
+ values=["1", "2", "3"],
+ default_value="1",
+ alias="shared_param1",
+ )
+ ],
+ "run": [
+ UserParamConfig(
+ name="--run_param1",
+ description="Run specific parameter",
+ values=["1", "2", "3"],
+ default_value="2",
+ alias="run_param1",
+ )
+ ],
+ },
+ )
+ )
+ assert len(system1.commands) == 2
+ build_command1 = system1.commands["build"]
+ assert build_command1 == Command(
+ [],
+ [
+ Param(
+ "--shared_param1",
+ "Shared parameter",
+ ["1", "2", "3"],
+ "1",
+ "shared_param1",
+ )
+ ],
+ )
+
+ run_command1 = system1.commands["run"]
+ assert run_command1 == Command(
+ [],
+ [
+ Param(
+ "--shared_param1",
+ "Shared parameter",
+ ["1", "2", "3"],
+ "1",
+ "shared_param1",
+ ),
+ Param(
+ "--run_param1",
+ "Run specific parameter",
+ ["1", "2", "3"],
+ "2",
+ "run_param1",
+ ),
+ ],
+ )
+
+ system2 = load_system(
+ SystemConfig(
+ name="test_system",
+ data_transfer=LocalProtocolConfig(protocol="local"),
+ commands={"build": ["build_command"]},
+ user_params={
+ "shared": [
+ UserParamConfig(
+ name="--shared_param1",
+ description="Shared parameter",
+ values=["1", "2", "3"],
+ default_value="1",
+ alias="shared_param1",
+ )
+ ],
+ "run": [
+ UserParamConfig(
+ name="--run_param1",
+ description="Run specific parameter",
+ values=["1", "2", "3"],
+ default_value="2",
+ alias="run_param1",
+ )
+ ],
+ },
+ )
+ )
+ assert len(system2.commands) == 2
+ build_command2 = system2.commands["build"]
+ assert build_command2 == Command(
+ ["build_command"],
+ [
+ Param(
+ "--shared_param1",
+ "Shared parameter",
+ ["1", "2", "3"],
+ "1",
+ "shared_param1",
+ )
+ ],
+ )
+
+ run_command2 = system1.commands["run"]
+ assert run_command2 == Command(
+ [],
+ [
+ Param(
+ "--shared_param1",
+ "Shared parameter",
+ ["1", "2", "3"],
+ "1",
+ "shared_param1",
+ ),
+ Param(
+ "--run_param1",
+ "Run specific parameter",
+ ["1", "2", "3"],
+ "2",
+ "run_param1",
+ ),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "mock_protocol, expected_call_count",
+ [(MagicMock(spec=SupportsDeploy), 1), (MagicMock(), 0)],
+)
+def test_system_deploy_data(
+ monkeypatch: Any, mock_protocol: MagicMock, expected_call_count: int
+) -> None:
+ """Test deploy data functionality."""
+ monkeypatch.setattr(
+ "mlia.backend.system.ProtocolFactory.get_protocol",
+ MagicMock(return_value=mock_protocol),
+ )
+
+ system = ControlledSystem(SystemConfig(name="test"))
+ system.deploy(Path("some_file"), "some_dest")
+
+ assert mock_protocol.deploy.call_count == expected_call_count
+
+
+@pytest.mark.parametrize(
+ "single_instance, controller_class",
+ ((False, SystemController), (True, SystemControllerSingleInstance)),
+)
+def test_get_controller(single_instance: bool, controller_class: type) -> None:
+ """Test function get_controller."""
+ controller = get_controller(single_instance)
+ assert isinstance(controller, controller_class)
diff --git a/tests/mlia/test_cli_logging.py b/tests/mlia/test_cli_logging.py
index 7c5f299..3f59cb6 100644
--- a/tests/mlia/test_cli_logging.py
+++ b/tests/mlia/test_cli_logging.py
@@ -32,7 +32,7 @@ def teardown_function() -> None:
(
None,
True,
- """mlia.tools.aiet_wrapper - aiet debug
+ """mlia.backend.manager - backends debug
cli info
mlia.cli - cli debug
""",
@@ -41,11 +41,11 @@ mlia.cli - cli debug
(
"logs",
True,
- """mlia.tools.aiet_wrapper - aiet debug
+ """mlia.backend.manager - backends debug
cli info
mlia.cli - cli debug
""",
- """mlia.tools.aiet_wrapper - DEBUG - aiet debug
+ """mlia.backend.manager - DEBUG - backends debug
mlia.cli - DEBUG - cli debug
""",
),
@@ -64,8 +64,8 @@ def test_setup_logging(
setup_logging(logs_dir_path, verbose)
- aiet_logger = logging.getLogger("mlia.tools.aiet_wrapper")
- aiet_logger.debug("aiet debug")
+ backend_logger = logging.getLogger("mlia.backend.manager")
+ backend_logger.debug("backends debug")
cli_logger = logging.getLogger("mlia.cli")
cli_logger.info("cli info")
diff --git a/tests/mlia/test_devices_ethosu_performance.py b/tests/mlia/test_devices_ethosu_performance.py
index e27efa0..b3e5298 100644
--- a/tests/mlia/test_devices_ethosu_performance.py
+++ b/tests/mlia/test_devices_ethosu_performance.py
@@ -23,6 +23,6 @@ def test_memory_usage_conversion() -> None:
def mock_performance_estimation(monkeypatch: pytest.MonkeyPatch) -> None:
"""Mock performance estimation."""
monkeypatch.setattr(
- "mlia.tools.aiet_wrapper.estimate_performance",
+ "mlia.backend.manager.estimate_performance",
MagicMock(return_value=MagicMock()),
)
diff --git a/tests/mlia/test_resources/application_config.json b/tests/mlia/test_resources/application_config.json
new file mode 100644
index 0000000..2dfcfec
--- /dev/null
+++ b/tests/mlia/test_resources/application_config.json
@@ -0,0 +1,96 @@
+[
+ {
+ "name": "application_1",
+ "description": "application number one",
+ "supported_systems": [
+ "system_1",
+ "system_2"
+ ],
+ "build_dir": "build_dir_11",
+ "commands": {
+ "clean": [
+ "clean_cmd_11"
+ ],
+ "build": [
+ "build_cmd_11"
+ ],
+ "run": [
+ "run_cmd_11"
+ ],
+ "post_run": [
+ "post_run_cmd_11"
+ ]
+ },
+ "user_params": {
+ "run": [
+ {
+ "name": "run_param_11",
+ "values": [],
+ "description": "run param number one"
+ }
+ ],
+ "build": [
+ {
+ "name": "build_param_11",
+ "values": [],
+ "description": "build param number one"
+ },
+ {
+ "name": "build_param_12",
+ "values": [],
+ "description": "build param number two"
+ },
+ {
+ "name": "build_param_13",
+ "values": [
+ "value_1"
+ ],
+ "description": "build param number three with some value"
+ }
+ ]
+ }
+ },
+ {
+ "name": "application_2",
+ "description": "application number two",
+ "supported_systems": [
+ "system_2"
+ ],
+ "build_dir": "build_dir_21",
+ "commands": {
+ "clean": [
+ "clean_cmd_21"
+ ],
+ "build": [
+ "build_cmd_21",
+ "build_cmd_22"
+ ],
+ "run": [
+ "run_cmd_21"
+ ],
+ "post_run": [
+ "post_run_cmd_21"
+ ]
+ },
+ "user_params": {
+ "build": [
+ {
+ "name": "build_param_21",
+ "values": [],
+ "description": "build param number one"
+ },
+ {
+ "name": "build_param_22",
+ "values": [],
+ "description": "build param number two"
+ },
+ {
+ "name": "build_param_23",
+ "values": [],
+ "description": "build param number three"
+ }
+ ],
+ "run": []
+ }
+ }
+]
diff --git a/tests/mlia/test_resources/application_config.json.license b/tests/mlia/test_resources/application_config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/application_config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/backends/applications/application1/aiet-config.json b/tests/mlia/test_resources/backends/applications/application1/aiet-config.json
new file mode 100644
index 0000000..97f0401
--- /dev/null
+++ b/tests/mlia/test_resources/backends/applications/application1/aiet-config.json
@@ -0,0 +1,30 @@
+[
+ {
+ "name": "application_1",
+ "description": "This is application 1",
+ "supported_systems": [
+ {
+ "name": "System 1"
+ }
+ ],
+ "build_dir": "build",
+ "commands": {
+ "clean": [
+ "echo 'clean'"
+ ],
+ "build": [
+ "echo 'build'"
+ ],
+ "run": [
+ "echo 'run'"
+ ],
+ "post_run": [
+ "echo 'post_run'"
+ ]
+ },
+ "user_params": {
+ "build": [],
+ "run": []
+ }
+ }
+]
diff --git a/tests/mlia/test_resources/backends/applications/application1/aiet-config.json.license b/tests/mlia/test_resources/backends/applications/application1/aiet-config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/backends/applications/application1/aiet-config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/backends/applications/application2/aiet-config.json b/tests/mlia/test_resources/backends/applications/application2/aiet-config.json
new file mode 100644
index 0000000..e9122d3
--- /dev/null
+++ b/tests/mlia/test_resources/backends/applications/application2/aiet-config.json
@@ -0,0 +1,30 @@
+[
+ {
+ "name": "application_2",
+ "description": "This is application 2",
+ "supported_systems": [
+ {
+ "name": "System 2"
+ }
+ ],
+ "build_dir": "build",
+ "commands": {
+ "clean": [
+ "echo 'clean'"
+ ],
+ "build": [
+ "echo 'build'"
+ ],
+ "run": [
+ "echo 'run'"
+ ],
+ "post_run": [
+ "echo 'post_run'"
+ ]
+ },
+ "user_params": {
+ "build": [],
+ "run": []
+ }
+ }
+]
diff --git a/tests/mlia/test_resources/backends/applications/application2/aiet-config.json.license b/tests/mlia/test_resources/backends/applications/application2/aiet-config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/backends/applications/application2/aiet-config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/backends/applications/application3/readme.txt b/tests/mlia/test_resources/backends/applications/application3/readme.txt
new file mode 100644
index 0000000..8c72c05
--- /dev/null
+++ b/tests/mlia/test_resources/backends/applications/application3/readme.txt
@@ -0,0 +1,4 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+SPDX-License-Identifier: Apache-2.0
+
+This application does not have json configuration file
diff --git a/tests/mlia/test_resources/backends/applications/application4/aiet-config.json b/tests/mlia/test_resources/backends/applications/application4/aiet-config.json
new file mode 100644
index 0000000..ffb5746
--- /dev/null
+++ b/tests/mlia/test_resources/backends/applications/application4/aiet-config.json
@@ -0,0 +1,36 @@
+[
+ {
+ "name": "application_4",
+ "description": "This is application 4",
+ "build_dir": "build",
+ "supported_systems": [
+ {
+ "name": "System 4"
+ }
+ ],
+ "commands": {
+ "build": [
+ "cp ../hello_app.txt .",
+ "echo '{user_params:0}' > params.txt"
+ ],
+ "run": [
+ "cat {application.build_dir}/hello_app.txt"
+ ]
+ },
+ "user_params": {
+ "build": [
+ {
+ "name": "--app",
+ "description": "Sample command param",
+ "values": [
+ "application1",
+ "application2",
+ "application3"
+ ],
+ "default_value": "application1"
+ }
+ ],
+ "run": []
+ }
+ }
+]
diff --git a/tests/mlia/test_resources/backends/applications/application4/aiet-config.json.license b/tests/mlia/test_resources/backends/applications/application4/aiet-config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/backends/applications/application4/aiet-config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/backends/applications/application4/hello_app.txt b/tests/mlia/test_resources/backends/applications/application4/hello_app.txt
new file mode 100644
index 0000000..2ec0d1d
--- /dev/null
+++ b/tests/mlia/test_resources/backends/applications/application4/hello_app.txt
@@ -0,0 +1,4 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+SPDX-License-Identifier: Apache-2.0
+
+Hello from APP!
diff --git a/tests/mlia/test_resources/backends/applications/application5/aiet-config.json b/tests/mlia/test_resources/backends/applications/application5/aiet-config.json
new file mode 100644
index 0000000..5269409
--- /dev/null
+++ b/tests/mlia/test_resources/backends/applications/application5/aiet-config.json
@@ -0,0 +1,160 @@
+[
+ {
+ "name": "application_5",
+ "description": "This is application 5",
+ "build_dir": "default_build_dir",
+ "supported_systems": [
+ {
+ "name": "System 1",
+ "lock": false
+ },
+ {
+ "name": "System 2"
+ }
+ ],
+ "variables": {
+ "var1": "value1",
+ "var2": "value2"
+ },
+ "lock": true,
+ "commands": {
+ "build": [
+ "default build command"
+ ],
+ "run": [
+ "default run command"
+ ]
+ },
+ "user_params": {
+ "build": [],
+ "run": []
+ }
+ },
+ {
+ "name": "application_5A",
+ "description": "This is application 5A",
+ "supported_systems": [
+ {
+ "name": "System 1",
+ "build_dir": "build_5A",
+ "variables": {
+ "var1": "new value1"
+ }
+ },
+ {
+ "name": "System 2",
+ "variables": {
+ "var2": "new value2"
+ },
+ "lock": true,
+ "commands": {
+ "run": [
+ "run command on system 2"
+ ]
+ }
+ }
+ ],
+ "variables": {
+ "var1": "value1",
+ "var2": "value2"
+ },
+ "build_dir": "build",
+ "commands": {
+ "build": [
+ "default build command"
+ ],
+ "run": [
+ "default run command"
+ ]
+ },
+ "user_params": {
+ "build": [],
+ "run": []
+ }
+ },
+ {
+ "name": "application_5B",
+ "description": "This is application 5B",
+ "supported_systems": [
+ {
+ "name": "System 1",
+ "build_dir": "build_5B",
+ "variables": {
+ "var1": "value for var1 System1",
+ "var2": "value for var2 System1"
+ },
+ "user_params": {
+ "build": [
+ {
+ "name": "--param_5B",
+ "description": "Sample command param",
+ "values": [
+ "value1",
+ "value2",
+ "value3"
+ ],
+ "default_value": "value1",
+ "alias": "param1"
+ }
+ ]
+ }
+ },
+ {
+ "name": "System 2",
+ "variables": {
+ "var1": "value for var1 System2",
+ "var2": "value for var2 System2"
+ },
+ "commands": {
+ "build": [
+ "build command on system 2 with {variables:var1} {user_params:param1}"
+ ],
+ "run": [
+ "run command on system 2"
+ ]
+ },
+ "user_params": {
+ "run": []
+ }
+ }
+ ],
+ "build_dir": "build",
+ "commands": {
+ "build": [
+ "default build command with {variables:var1}"
+ ],
+ "run": [
+ "default run command with {variables:var2}"
+ ]
+ },
+ "user_params": {
+ "build": [
+ {
+ "name": "--param",
+ "description": "Sample command param",
+ "values": [
+ "value1",
+ "value2",
+ "value3"
+ ],
+ "default_value": "value1",
+ "alias": "param1"
+ }
+ ],
+ "run": [],
+ "non_used_command": [
+ {
+ "name": "--not-used",
+ "description": "Not used param anywhere",
+ "values": [
+ "value1",
+ "value2",
+ "value3"
+ ],
+ "default_value": "value1",
+ "alias": "param1"
+ }
+ ]
+ }
+ }
+]
diff --git a/tests/mlia/test_resources/backends/applications/application5/aiet-config.json.license b/tests/mlia/test_resources/backends/applications/application5/aiet-config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/backends/applications/application5/aiet-config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/backends/applications/application6/aiet-config.json b/tests/mlia/test_resources/backends/applications/application6/aiet-config.json
new file mode 100644
index 0000000..56ad807
--- /dev/null
+++ b/tests/mlia/test_resources/backends/applications/application6/aiet-config.json
@@ -0,0 +1,42 @@
+[
+ {
+ "name": "application_6",
+ "description": "This is application 6",
+ "supported_systems": [
+ {
+ "name": "System 6"
+ }
+ ],
+ "build_dir": "build",
+ "commands": {
+ "clean": [
+ "echo 'clean'"
+ ],
+ "build": [
+ "echo 'build'"
+ ],
+ "run": [
+ "echo 'run {user_params:param1}'"
+ ],
+ "post_run": [
+ "echo 'post_run'"
+ ]
+ },
+ "user_params": {
+ "build": [],
+ "run": [
+ {
+ "name": "--param1",
+ "description": "Test parameter",
+ "values": [
+ "value1",
+ "value2",
+ "value3"
+ ],
+ "default_value": "value3",
+ "alias": "param1"
+ }
+ ]
+ }
+ }
+]
diff --git a/tests/mlia/test_resources/backends/applications/application6/aiet-config.json.license b/tests/mlia/test_resources/backends/applications/application6/aiet-config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/backends/applications/application6/aiet-config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/backends/applications/readme.txt b/tests/mlia/test_resources/backends/applications/readme.txt
new file mode 100644
index 0000000..a1f8209
--- /dev/null
+++ b/tests/mlia/test_resources/backends/applications/readme.txt
@@ -0,0 +1,4 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+SPDX-License-Identifier: Apache-2.0
+
+Dummy file for test purposes
diff --git a/tests/mlia/test_resources/backends/systems/system1/aiet-config.json b/tests/mlia/test_resources/backends/systems/system1/aiet-config.json
new file mode 100644
index 0000000..4b5dd19
--- /dev/null
+++ b/tests/mlia/test_resources/backends/systems/system1/aiet-config.json
@@ -0,0 +1,35 @@
+[
+ {
+ "name": "System 1",
+ "description": "This is system 1",
+ "build_dir": "build",
+ "data_transfer": {
+ "protocol": "ssh",
+ "username": "root",
+ "password": "root",
+ "hostname": "localhost",
+ "port": "8021"
+ },
+ "commands": {
+ "clean": [
+ "echo 'clean'"
+ ],
+ "build": [
+ "echo 'build'"
+ ],
+ "run": [
+ "echo 'run'"
+ ],
+ "post_run": [
+ "echo 'post_run'"
+ ],
+ "deploy": [
+ "echo 'deploy'"
+ ]
+ },
+ "user_params": {
+ "build": [],
+ "run": []
+ }
+ }
+]
diff --git a/tests/mlia/test_resources/backends/systems/system1/aiet-config.json.license b/tests/mlia/test_resources/backends/systems/system1/aiet-config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/backends/systems/system1/aiet-config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/backends/systems/system1/system_artifact/dummy.txt b/tests/mlia/test_resources/backends/systems/system1/system_artifact/dummy.txt
new file mode 100644
index 0000000..487e9d8
--- /dev/null
+++ b/tests/mlia/test_resources/backends/systems/system1/system_artifact/dummy.txt
@@ -0,0 +1,2 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/backends/systems/system2/aiet-config.json b/tests/mlia/test_resources/backends/systems/system2/aiet-config.json
new file mode 100644
index 0000000..a9e0eb3
--- /dev/null
+++ b/tests/mlia/test_resources/backends/systems/system2/aiet-config.json
@@ -0,0 +1,32 @@
+[
+ {
+ "name": "System 2",
+ "description": "This is system 2",
+ "build_dir": "build",
+ "data_transfer": {
+ "protocol": "ssh",
+ "username": "root",
+ "password": "root",
+ "hostname": "localhost",
+ "port": "8021"
+ },
+ "commands": {
+ "clean": [
+ "echo 'clean'"
+ ],
+ "build": [
+ "echo 'build'"
+ ],
+ "run": [
+ "echo 'run'"
+ ],
+ "post_run": [
+ "echo 'post_run'"
+ ]
+ },
+ "user_params": {
+ "build": [],
+ "run": []
+ }
+ }
+]
diff --git a/tests/mlia/test_resources/backends/systems/system2/aiet-config.json.license b/tests/mlia/test_resources/backends/systems/system2/aiet-config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/backends/systems/system2/aiet-config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/backends/systems/system3/readme.txt b/tests/mlia/test_resources/backends/systems/system3/readme.txt
new file mode 100644
index 0000000..aba5a9c
--- /dev/null
+++ b/tests/mlia/test_resources/backends/systems/system3/readme.txt
@@ -0,0 +1,4 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+SPDX-License-Identifier: Apache-2.0
+
+This system does not have the json configuration file
diff --git a/tests/mlia/test_resources/backends/systems/system4/aiet-config.json b/tests/mlia/test_resources/backends/systems/system4/aiet-config.json
new file mode 100644
index 0000000..7b13160
--- /dev/null
+++ b/tests/mlia/test_resources/backends/systems/system4/aiet-config.json
@@ -0,0 +1,19 @@
+[
+ {
+ "name": "System 4",
+ "description": "This is system 4",
+ "build_dir": "build",
+ "data_transfer": {
+ "protocol": "local"
+ },
+ "commands": {
+ "run": [
+ "echo {application.name}",
+ "{application.commands.run:0}"
+ ]
+ },
+ "user_params": {
+ "run": []
+ }
+ }
+]
diff --git a/tests/mlia/test_resources/backends/systems/system4/aiet-config.json.license b/tests/mlia/test_resources/backends/systems/system4/aiet-config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/backends/systems/system4/aiet-config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/backends/systems/system6/aiet-config.json b/tests/mlia/test_resources/backends/systems/system6/aiet-config.json
new file mode 100644
index 0000000..4242f64
--- /dev/null
+++ b/tests/mlia/test_resources/backends/systems/system6/aiet-config.json
@@ -0,0 +1,34 @@
+[
+ {
+ "name": "System 6",
+ "description": "This is system 6",
+ "build_dir": "build",
+ "data_transfer": {
+ "protocol": "local"
+ },
+ "variables": {
+ "var1": "{user_params:sys-param1}"
+ },
+ "commands": {
+ "run": [
+ "echo {application.name}",
+ "{application.commands.run:0}"
+ ]
+ },
+ "user_params": {
+ "run": [
+ {
+ "name": "--sys-param1",
+ "description": "Test parameter",
+ "values": [
+ "value1",
+ "value2",
+ "value3"
+ ],
+ "default_value": "value1",
+ "alias": "sys-param1"
+ }
+ ]
+ }
+ }
+]
diff --git a/tests/mlia/test_resources/backends/systems/system6/aiet-config.json.license b/tests/mlia/test_resources/backends/systems/system6/aiet-config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/backends/systems/system6/aiet-config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/hello_world.json b/tests/mlia/test_resources/hello_world.json
new file mode 100644
index 0000000..8a9a448
--- /dev/null
+++ b/tests/mlia/test_resources/hello_world.json
@@ -0,0 +1,54 @@
+[
+ {
+ "name": "Hello world",
+ "description": "Dummy application that displays 'Hello world!'",
+ "supported_systems": [
+ "Dummy System"
+ ],
+ "build_dir": "build",
+ "deploy_data": [
+ [
+ "src",
+ "/tmp/"
+ ],
+ [
+ "README",
+ "/tmp/README.md"
+ ]
+ ],
+ "commands": {
+ "clean": [],
+ "build": [],
+ "run": [
+ "echo 'Hello world!'",
+ "ls -l /tmp"
+ ],
+ "post_run": []
+ },
+ "user_params": {
+ "run": [
+ {
+ "name": "--choice-param",
+ "values": [
+ "dummy_value_1",
+ "dummy_value_2"
+ ],
+ "default_value": "dummy_value_1",
+ "description": "Choice param"
+ },
+ {
+ "name": "--open-param",
+ "values": [],
+ "default_value": "dummy_value_4",
+ "description": "Open param"
+ },
+ {
+ "name": "--enable-flag",
+ "default_value": "dummy_value_4",
+ "description": "Flag param"
+ }
+ ],
+ "build": []
+ }
+ }
+]
diff --git a/tests/mlia/test_resources/hello_world.json.license b/tests/mlia/test_resources/hello_world.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/hello_world.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/scripts/test_backend_run b/tests/mlia/test_resources/scripts/test_backend_run
new file mode 100755
index 0000000..548f577
--- /dev/null
+++ b/tests/mlia/test_resources/scripts/test_backend_run
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+
+echo "Hello from script"
+>&2 echo "Oops!"
+sleep 100
diff --git a/tests/mlia/test_resources/scripts/test_backend_run_script.sh b/tests/mlia/test_resources/scripts/test_backend_run_script.sh
new file mode 100644
index 0000000..548f577
--- /dev/null
+++ b/tests/mlia/test_resources/scripts/test_backend_run_script.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+
+echo "Hello from script"
+>&2 echo "Oops!"
+sleep 100
diff --git a/tests/mlia/test_resources/various/applications/application_with_empty_config/aiet-config.json b/tests/mlia/test_resources/various/applications/application_with_empty_config/aiet-config.json
new file mode 100644
index 0000000..fe51488
--- /dev/null
+++ b/tests/mlia/test_resources/various/applications/application_with_empty_config/aiet-config.json
@@ -0,0 +1 @@
+[]
diff --git a/tests/mlia/test_resources/various/applications/application_with_empty_config/aiet-config.json.license b/tests/mlia/test_resources/various/applications/application_with_empty_config/aiet-config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/various/applications/application_with_empty_config/aiet-config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/various/applications/application_with_valid_config/aiet-config.json b/tests/mlia/test_resources/various/applications/application_with_valid_config/aiet-config.json
new file mode 100644
index 0000000..ff1cf1a
--- /dev/null
+++ b/tests/mlia/test_resources/various/applications/application_with_valid_config/aiet-config.json
@@ -0,0 +1,35 @@
+[
+ {
+ "name": "test_application",
+ "description": "This is test_application",
+ "build_dir": "build",
+ "supported_systems": [
+ {
+ "name": "System 4"
+ }
+ ],
+ "commands": {
+ "build": [
+ "cp ../hello_app.txt ."
+ ],
+ "run": [
+ "{application.build_dir}/hello_app.txt"
+ ]
+ },
+ "user_params": {
+ "build": [
+ {
+ "name": "--app",
+ "description": "Sample command param",
+ "values": [
+ "application1",
+ "application2",
+ "application3"
+ ],
+ "default_value": "application1"
+ }
+ ],
+ "run": []
+ }
+ }
+]
diff --git a/tests/mlia/test_resources/various/applications/application_with_valid_config/aiet-config.json.license b/tests/mlia/test_resources/various/applications/application_with_valid_config/aiet-config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/various/applications/application_with_valid_config/aiet-config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/various/applications/application_with_wrong_config1/aiet-config.json b/tests/mlia/test_resources/various/applications/application_with_wrong_config1/aiet-config.json
new file mode 100644
index 0000000..724b31b
--- /dev/null
+++ b/tests/mlia/test_resources/various/applications/application_with_wrong_config1/aiet-config.json
@@ -0,0 +1,2 @@
+This is not valid json file
+{
diff --git a/tests/mlia/test_resources/various/applications/application_with_wrong_config1/aiet-config.json.license b/tests/mlia/test_resources/various/applications/application_with_wrong_config1/aiet-config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/various/applications/application_with_wrong_config1/aiet-config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/various/applications/application_with_wrong_config2/aiet-config.json b/tests/mlia/test_resources/various/applications/application_with_wrong_config2/aiet-config.json
new file mode 100644
index 0000000..1ebb29c
--- /dev/null
+++ b/tests/mlia/test_resources/various/applications/application_with_wrong_config2/aiet-config.json
@@ -0,0 +1,30 @@
+[
+ {
+ "name": "test_application",
+ "description": "This is test_application",
+ "build_dir": "build",
+ "commands": {
+ "build": [
+ "cp ../hello_app.txt ."
+ ],
+ "run": [
+ "{application.build_dir}/hello_app.txt"
+ ]
+ },
+ "user_params": {
+ "build": [
+ {
+ "name": "--app",
+ "description": "Sample command param",
+ "values": [
+ "application1",
+ "application2",
+ "application3"
+ ],
+ "default_value": "application1"
+ }
+ ],
+ "run": []
+ }
+ }
+]
diff --git a/tests/mlia/test_resources/various/applications/application_with_wrong_config2/aiet-config.json.license b/tests/mlia/test_resources/various/applications/application_with_wrong_config2/aiet-config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/various/applications/application_with_wrong_config2/aiet-config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/various/applications/application_with_wrong_config3/aiet-config.json b/tests/mlia/test_resources/various/applications/application_with_wrong_config3/aiet-config.json
new file mode 100644
index 0000000..410d12d
--- /dev/null
+++ b/tests/mlia/test_resources/various/applications/application_with_wrong_config3/aiet-config.json
@@ -0,0 +1,35 @@
+[
+ {
+ "name": "test_application",
+ "description": "This is test_application",
+ "build_dir": "build",
+ "supported_systems": [
+ {
+ "anme": "System 4"
+ }
+ ],
+ "commands": {
+ "build": [
+ "cp ../hello_app.txt ."
+ ],
+ "run": [
+ "{application.build_dir}/hello_app.txt"
+ ]
+ },
+ "user_params": {
+ "build": [
+ {
+ "name": "--app",
+ "description": "Sample command param",
+ "values": [
+ "application1",
+ "application2",
+ "application3"
+ ],
+ "default_value": "application1"
+ }
+ ],
+ "run": []
+ }
+ }
+]
diff --git a/tests/mlia/test_resources/various/applications/application_with_wrong_config3/aiet-config.json.license b/tests/mlia/test_resources/various/applications/application_with_wrong_config3/aiet-config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/various/applications/application_with_wrong_config3/aiet-config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/various/systems/system_with_empty_config/aiet-config.json b/tests/mlia/test_resources/various/systems/system_with_empty_config/aiet-config.json
new file mode 100644
index 0000000..fe51488
--- /dev/null
+++ b/tests/mlia/test_resources/various/systems/system_with_empty_config/aiet-config.json
@@ -0,0 +1 @@
+[]
diff --git a/tests/mlia/test_resources/various/systems/system_with_empty_config/aiet-config.json.license b/tests/mlia/test_resources/various/systems/system_with_empty_config/aiet-config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/various/systems/system_with_empty_config/aiet-config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_resources/various/systems/system_with_valid_config/aiet-config.json b/tests/mlia/test_resources/various/systems/system_with_valid_config/aiet-config.json
new file mode 100644
index 0000000..20142e9
--- /dev/null
+++ b/tests/mlia/test_resources/various/systems/system_with_valid_config/aiet-config.json
@@ -0,0 +1,16 @@
+[
+ {
+ "name": "Test system",
+ "description": "This is a test system",
+ "build_dir": "build",
+ "data_transfer": {
+ "protocol": "local"
+ },
+ "commands": {
+ "run": []
+ },
+ "user_params": {
+ "run": []
+ }
+ }
+]
diff --git a/tests/mlia/test_resources/various/systems/system_with_valid_config/aiet-config.json.license b/tests/mlia/test_resources/various/systems/system_with_valid_config/aiet-config.json.license
new file mode 100644
index 0000000..9b83bfc
--- /dev/null
+++ b/tests/mlia/test_resources/various/systems/system_with_valid_config/aiet-config.json.license
@@ -0,0 +1,3 @@
+SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+
+SPDX-License-Identifier: Apache-2.0
diff --git a/tests/mlia/test_tools_metadata_corstone.py b/tests/mlia/test_tools_metadata_corstone.py
index 2ce3610..017d0c7 100644
--- a/tests/mlia/test_tools_metadata_corstone.py
+++ b/tests/mlia/test_tools_metadata_corstone.py
@@ -9,13 +9,13 @@ from unittest.mock import MagicMock
import pytest
-from mlia.tools.aiet_wrapper import AIETRunner
+from mlia.backend.manager import BackendRunner
from mlia.tools.metadata.common import DownloadAndInstall
from mlia.tools.metadata.common import InstallFromPath
-from mlia.tools.metadata.corstone import AIETBasedInstallation
-from mlia.tools.metadata.corstone import AIETMetadata
from mlia.tools.metadata.corstone import BackendInfo
+from mlia.tools.metadata.corstone import BackendInstallation
from mlia.tools.metadata.corstone import BackendInstaller
+from mlia.tools.metadata.corstone import BackendMetadata
from mlia.tools.metadata.corstone import CompoundPathChecker
from mlia.tools.metadata.corstone import Corstone300Installer
from mlia.tools.metadata.corstone import get_corstone_installations
@@ -40,8 +40,8 @@ def fixture_test_mlia_resources(
return mlia_resources
-def get_aiet_based_installation( # pylint: disable=too-many-arguments
- aiet_runner_mock: MagicMock = MagicMock(),
+def get_backend_installation( # pylint: disable=too-many-arguments
+ backend_runner_mock: MagicMock = MagicMock(),
name: str = "test_name",
description: str = "test_description",
download_artifact: Optional[MagicMock] = None,
@@ -50,11 +50,11 @@ def get_aiet_based_installation( # pylint: disable=too-many-arguments
system_config: Optional[str] = None,
backend_installer: BackendInstaller = MagicMock(),
supported_platforms: Optional[List[str]] = None,
-) -> AIETBasedInstallation:
- """Get AIET based installation."""
- return AIETBasedInstallation(
- aiet_runner=aiet_runner_mock,
- metadata=AIETMetadata(
+) -> BackendInstallation:
+ """Get backend installation."""
+ return BackendInstallation(
+ backend_runner=backend_runner_mock,
+ metadata=BackendMetadata(
name=name,
description=description,
system_config=system_config or "",
@@ -90,10 +90,10 @@ def test_could_be_installed_depends_on_platform(
monkeypatch.setattr(
"mlia.tools.metadata.corstone.all_paths_valid", MagicMock(return_value=True)
)
- aiet_runner_mock = MagicMock(spec=AIETRunner)
+ backend_runner_mock = MagicMock(spec=BackendRunner)
- installation = get_aiet_based_installation(
- aiet_runner_mock,
+ installation = get_backend_installation(
+ backend_runner_mock,
supported_platforms=supported_platforms,
)
assert installation.could_be_installed == expected_result
@@ -103,53 +103,53 @@ def test_get_corstone_installations() -> None:
"""Test function get_corstone_installation."""
installs = get_corstone_installations()
assert len(installs) == 2
- assert all(isinstance(install, AIETBasedInstallation) for install in installs)
+ assert all(isinstance(install, BackendInstallation) for install in installs)
-def test_aiet_based_installation_metadata_resolving() -> None:
- """Test AIET based installation metadata resolving."""
- aiet_runner_mock = MagicMock(spec=AIETRunner)
- installation = get_aiet_based_installation(aiet_runner_mock)
+def test_backend_installation_metadata_resolving() -> None:
+ """Test backend installation metadata resolving."""
+ backend_runner_mock = MagicMock(spec=BackendRunner)
+ installation = get_backend_installation(backend_runner_mock)
assert installation.name == "test_name"
assert installation.description == "test_description"
- aiet_runner_mock.all_installed.return_value = False
+ backend_runner_mock.all_installed.return_value = False
assert installation.already_installed is False
assert installation.could_be_installed is True
-def test_aiet_based_installation_supported_install_types(tmp_path: Path) -> None:
+def test_backend_installation_supported_install_types(tmp_path: Path) -> None:
"""Test supported installation types."""
- installation_no_download_artifact = get_aiet_based_installation()
+ installation_no_download_artifact = get_backend_installation()
assert installation_no_download_artifact.supports(DownloadAndInstall()) is False
- installation_with_download_artifact = get_aiet_based_installation(
+ installation_with_download_artifact = get_backend_installation(
download_artifact=MagicMock()
)
assert installation_with_download_artifact.supports(DownloadAndInstall()) is True
path_checker_mock = MagicMock(return_value=BackendInfo(tmp_path))
- installation_can_install_from_dir = get_aiet_based_installation(
+ installation_can_install_from_dir = get_backend_installation(
path_checker=path_checker_mock
)
assert installation_can_install_from_dir.supports(InstallFromPath(tmp_path)) is True
- any_installation = get_aiet_based_installation()
+ any_installation = get_backend_installation()
assert any_installation.supports("unknown_install_type") is False # type: ignore
-def test_aiet_based_installation_install_wrong_type() -> None:
+def test_backend_installation_install_wrong_type() -> None:
"""Test that operation should fail if wrong install type provided."""
with pytest.raises(Exception, match="Unable to install wrong_install_type"):
- aiet_runner_mock = MagicMock(spec=AIETRunner)
- installation = get_aiet_based_installation(aiet_runner_mock)
+ backend_runner_mock = MagicMock(spec=BackendRunner)
+ installation = get_backend_installation(backend_runner_mock)
installation.install("wrong_install_type") # type: ignore
-def test_aiet_based_installation_install_from_path(
+def test_backend_installation_install_from_path(
tmp_path: Path, test_mlia_resources: Path
) -> None:
"""Test installation from the path."""
@@ -164,9 +164,9 @@ def test_aiet_based_installation_install_from_path(
path_checker_mock = MagicMock(return_value=BackendInfo(dist_dir))
- aiet_runner_mock = MagicMock(spec=AIETRunner)
- installation = get_aiet_based_installation(
- aiet_runner_mock=aiet_runner_mock,
+ backend_runner_mock = MagicMock(spec=BackendRunner)
+ installation = get_backend_installation(
+ backend_runner_mock=backend_runner_mock,
path_checker=path_checker_mock,
apps_resources=[sample_app.name],
system_config="example_config.json",
@@ -175,12 +175,12 @@ def test_aiet_based_installation_install_from_path(
assert installation.supports(InstallFromPath(dist_dir)) is True
installation.install(InstallFromPath(dist_dir))
- aiet_runner_mock.install_system.assert_called_once()
- aiet_runner_mock.install_application.assert_called_once_with(sample_app)
+ backend_runner_mock.install_system.assert_called_once()
+ backend_runner_mock.install_application.assert_called_once_with(sample_app)
@pytest.mark.parametrize("copy_source", [True, False])
-def test_aiet_based_installation_install_from_static_path(
+def test_backend_installation_install_from_static_path(
tmp_path: Path, test_mlia_resources: Path, copy_source: bool
) -> None:
"""Test installation from the predefined path."""
@@ -204,7 +204,7 @@ def test_aiet_based_installation_install_from_static_path(
nested_file = predefined_location_dir / "nested_file.txt"
nested_file.touch()
- aiet_runner_mock = MagicMock(spec=AIETRunner)
+ backend_runner_mock = MagicMock(spec=BackendRunner)
def check_install_dir(install_dir: Path) -> None:
"""Check content of the install dir."""
@@ -220,10 +220,10 @@ def test_aiet_based_installation_install_from_static_path(
assert install_dir / "custom_config.json" in files
- aiet_runner_mock.install_system.side_effect = check_install_dir
+ backend_runner_mock.install_system.side_effect = check_install_dir
- installation = get_aiet_based_installation(
- aiet_runner_mock=aiet_runner_mock,
+ installation = get_backend_installation(
+ backend_runner_mock=backend_runner_mock,
path_checker=StaticPathChecker(
predefined_location,
["file.txt"],
@@ -237,8 +237,8 @@ def test_aiet_based_installation_install_from_static_path(
assert installation.supports(InstallFromPath(predefined_location)) is True
installation.install(InstallFromPath(predefined_location))
- aiet_runner_mock.install_system.assert_called_once()
- aiet_runner_mock.install_application.assert_called_once_with(sample_app)
+ backend_runner_mock.install_system.assert_called_once()
+ backend_runner_mock.install_application.assert_called_once_with(sample_app)
def create_sample_fvp_archive(tmp_path: Path) -> Path:
@@ -259,7 +259,7 @@ def create_sample_fvp_archive(tmp_path: Path) -> Path:
return fvp_archive
-def test_aiet_based_installation_download_and_install(
+def test_backend_installation_download_and_install(
test_mlia_resources: Path, tmp_path: Path
) -> None:
"""Test downloading and installation process."""
@@ -277,9 +277,9 @@ def test_aiet_based_installation_download_and_install(
"""Sample installer."""
return dist_dir
- aiet_runner_mock = MagicMock(spec=AIETRunner)
- installation = get_aiet_based_installation(
- aiet_runner_mock,
+ backend_runner_mock = MagicMock(spec=BackendRunner)
+ installation = get_backend_installation(
+ backend_runner_mock,
download_artifact=download_artifact_mock,
backend_installer=installer,
path_checker=path_checker,
@@ -288,7 +288,7 @@ def test_aiet_based_installation_download_and_install(
installation.install(DownloadAndInstall())
- aiet_runner_mock.install_system.assert_called_once()
+ backend_runner_mock.install_system.assert_called_once()
@pytest.mark.parametrize(