aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenjamin Klimczak <benjamin.klimczak@arm.com>2023-08-24 16:38:47 +0100
committerBenjamin Klimczak <benjamin.klimczak@arm.com>2023-09-05 14:20:08 +0100
commite5a0bc3ecd4d9c46ead3b8217584eaa916a3afa4 (patch)
tree94c348fcef50326a755a049a2a4027f588211f8b
parent900c3c52b681e0b8a4454e2e2cf29265d53a2c98 (diff)
downloadmlia-e5a0bc3ecd4d9c46ead3b8217584eaa916a3afa4.tar.gz
MLIA-961 Update tox dependencies
- Update version dependencies in the tox.ini - Fix linter issues Change-Id: I04c3a841ee2646a865dab037701d66c28792f2a4 Signed-off-by: Benjamin Klimczak <benjamin.klimczak@arm.com>
-rw-r--r--src/mlia/backend/corstone/install.py2
-rw-r--r--src/mlia/backend/install.py8
-rw-r--r--src/mlia/backend/repo.py12
-rw-r--r--src/mlia/backend/vela/compiler.py8
-rw-r--r--src/mlia/backend/vela/performance.py4
-rw-r--r--src/mlia/cli/commands.py2
-rw-r--r--src/mlia/core/advisor.py4
-rw-r--r--src/mlia/core/common.py2
-rw-r--r--src/mlia/core/events.py6
-rw-r--r--src/mlia/core/mixins.py12
-rw-r--r--src/mlia/core/reporting.py2
-rw-r--r--src/mlia/nn/tensorflow/config.py6
-rw-r--r--src/mlia/nn/tensorflow/optimizations/select.py4
-rw-r--r--src/mlia/nn/tensorflow/utils.py6
-rw-r--r--src/mlia/target/cortex_a/advisor.py4
-rw-r--r--src/mlia/target/cortex_a/operators.py2
-rw-r--r--src/mlia/target/cortex_a/reporters.py2
-rw-r--r--src/mlia/target/ethos_u/advisor.py6
-rw-r--r--src/mlia/target/ethos_u/data_collection.py2
-rw-r--r--src/mlia/target/ethos_u/reporters.py2
-rw-r--r--src/mlia/target/tosa/advisor.py6
-rw-r--r--src/mlia/target/tosa/operators.py4
-rw-r--r--src/mlia/target/tosa/reporters.py2
-rw-r--r--src/mlia/utils/console.py2
-rw-r--r--src/mlia/utils/logging.py2
-rw-r--r--tests/test_cli_options.py70
-rw-r--r--tests/test_nn_tensorflow_config.py12
-rw-r--r--tests/test_nn_tensorflow_optimizations_select.py11
-rw-r--r--tests/test_utils_console.py2
-rw-r--r--tests/test_utils_filesystem.py4
-rw-r--r--tests_e2e/test_e2e.py14
-rw-r--r--tox.ini8
32 files changed, 121 insertions, 112 deletions
diff --git a/src/mlia/backend/corstone/install.py b/src/mlia/backend/corstone/install.py
index 35976cf..d6101cf 100644
--- a/src/mlia/backend/corstone/install.py
+++ b/src/mlia/backend/corstone/install.py
@@ -51,7 +51,7 @@ class Corstone300Installer:
# this instance
subprocess.check_call(fvp_install_cmd) # nosec
except subprocess.CalledProcessError as err:
- raise Exception(
+ raise RuntimeError(
"Error occurred during Corstone-300 installation"
) from err
diff --git a/src/mlia/backend/install.py b/src/mlia/backend/install.py
index 4745f19..721b660 100644
--- a/src/mlia/backend/install.py
+++ b/src/mlia/backend/install.py
@@ -145,7 +145,7 @@ class BackendInstallation(Installation):
assert backend_info is not None, "Unable to resolve backend path"
self._install_from(backend_info)
else:
- raise Exception(f"Unable to install {install_type}")
+ raise RuntimeError(f"Unable to install {install_type}.")
def _install_from(self, backend_info: BackendInfo) -> None:
"""Install backend from the directory."""
@@ -173,7 +173,7 @@ class BackendInstallation(Installation):
try:
downloaded_to = download_artifact.download_to(tmpdir)
except Exception as err:
- raise Exception("Unable to download backend artifact") from err
+ raise RuntimeError("Unable to download backend artifact.") from err
with working_directory(tmpdir / "dist", create_dir=True) as dist_dir:
with tarfile.open(downloaded_to) as archive:
@@ -184,7 +184,7 @@ class BackendInstallation(Installation):
backend_path = self.backend_installer(eula_agrement, dist_dir)
if self.path_checker(backend_path) is None:
- raise Exception("Downloaded artifact has invalid structure")
+ raise ValueError("Downloaded artifact has invalid structure.")
self.install(InstallFromPath(backend_path))
@@ -311,7 +311,7 @@ class PyPackageBackendInstallation(Installation):
def install(self, install_type: InstallationType) -> None:
"""Install the backend."""
if not self.supports(install_type):
- raise Exception(f"Unsupported installation type {install_type}")
+ raise ValueError(f"Unsupported installation type {install_type}.")
self.package_manager.install(self._packages_to_install)
diff --git a/src/mlia/backend/repo.py b/src/mlia/backend/repo.py
index 3dd2e57..b64a46a 100644
--- a/src/mlia/backend/repo.py
+++ b/src/mlia/backend/repo.py
@@ -109,7 +109,7 @@ class BackendRepository:
repo_backend_path = self._get_backend_path(backend_dir_name)
if repo_backend_path.exists():
- raise Exception(f"Unable to copy backend files for {backend_name}.")
+ raise RuntimeError(f"Unable to copy backend files for {backend_name}.")
copy_all(backend_path, dest=repo_backend_path)
@@ -126,7 +126,7 @@ class BackendRepository:
) -> None:
"""Add backend to repository."""
if self.is_backend_installed(backend_name):
- raise Exception(f"Backend {backend_name} already installed.")
+ raise RuntimeError(f"Backend {backend_name} already installed.")
settings = settings or {}
settings["backend_path"] = backend_path.absolute().as_posix()
@@ -138,7 +138,7 @@ class BackendRepository:
settings = self.config_file.get_backend_settings(backend_name)
if not settings:
- raise Exception(f"Backend {backend_name} is not installed.")
+ raise RuntimeError(f"Backend {backend_name} is not installed.")
if "backend_dir" in settings:
repo_backend_path = self._get_backend_path(settings["backend_dir"])
@@ -155,7 +155,7 @@ class BackendRepository:
settings = self.config_file.get_backend_settings(backend_name)
if not settings:
- raise Exception(f"Backend {backend_name} is not installed.")
+ raise RuntimeError(f"Backend {backend_name} is not installed.")
if backend_dir := settings.get("backend_dir", None):
return self._get_backend_path(backend_dir), settings
@@ -163,7 +163,7 @@ class BackendRepository:
if backend_path := settings.get("backend_path", None):
return Path(backend_path), settings
- raise Exception(f"Unable to resolve path of the backend {backend_name}.")
+ raise RuntimeError(f"Unable to resolve path of the backend {backend_name}.")
def _get_backend_path(self, backend_dir_name: str) -> Path:
"""Return path to backend."""
@@ -173,7 +173,7 @@ class BackendRepository:
"""Init repository."""
if self.repository.exists():
if not self.config_file.exists():
- raise Exception(
+ raise RuntimeError(
f"Directory {self.repository} could not be used as MLIA repository."
)
else:
diff --git a/src/mlia/backend/vela/compiler.py b/src/mlia/backend/vela/compiler.py
index afad05b..78f97b2 100644
--- a/src/mlia/backend/vela/compiler.py
+++ b/src/mlia/backend/vela/compiler.py
@@ -129,7 +129,7 @@ class VelaCompiler: # pylint: disable=too-many-instance-attributes
nng, network_type = model.nng, NetworkType.TFLite
if not nng:
- raise Exception("Unable to read model")
+ raise ValueError("Unable to read model: model.nng is not available")
output_basename = f"{self.output_dir}/{nng.name}"
@@ -152,7 +152,9 @@ class VelaCompiler: # pylint: disable=too-many-instance-attributes
return OptimizedModel(nng, arch, compiler_options, scheduler_options)
except (SystemExit, Exception) as err:
- raise Exception("Model could not be optimized with Vela compiler") from err
+ raise RuntimeError(
+ "Model could not be optimized with Vela compiler."
+ ) from err
def get_config(self) -> dict[str, Any]:
"""Get compiler configuration."""
@@ -200,7 +202,7 @@ class VelaCompiler: # pylint: disable=too-many-instance-attributes
):
return read_model(model_path, ModelReaderOptions()) # type: ignore
except (SystemExit, Exception) as err:
- raise Exception(f"Unable to read model {model_path}") from err
+ raise RuntimeError(f"Unable to read model {model_path}.") from err
def _architecture_features(self) -> ArchitectureFeatures:
"""Return ArchitectureFeatures instance."""
diff --git a/src/mlia/backend/vela/performance.py b/src/mlia/backend/vela/performance.py
index e545b85..a548b26 100644
--- a/src/mlia/backend/vela/performance.py
+++ b/src/mlia/backend/vela/performance.py
@@ -56,7 +56,9 @@ def estimate_performance(
initial_model = vela_compiler.read_model(model_path)
if initial_model.optimized:
- raise Exception("Unable to estimate performance for the given optimized model")
+ raise ValueError(
+ "Unable to estimate performance for the given optimized model."
+ )
optimized_model = vela_compiler.compile_model(initial_model)
diff --git a/src/mlia/cli/commands.py b/src/mlia/cli/commands.py
index f0ba519..1f339ee 100644
--- a/src/mlia/cli/commands.py
+++ b/src/mlia/cli/commands.py
@@ -72,7 +72,7 @@ def check(
"model.h5", compatibility=True, performance=True)
"""
if not model:
- raise Exception("Model is not provided")
+ raise ValueError("Model is not provided.")
# Set category based on checks to perform (i.e. "compatibility" and/or
# "performance").
diff --git a/src/mlia/core/advisor.py b/src/mlia/core/advisor.py
index d684241..7db5cfb 100644
--- a/src/mlia/core/advisor.py
+++ b/src/mlia/core/advisor.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Inference advisor module."""
from __future__ import annotations
@@ -77,7 +77,7 @@ class DefaultInferenceAdvisor(InferenceAdvisor, ParameterResolverMixin):
model = Path(model_param)
if not model.exists():
- raise Exception(f"Path {model} does not exist")
+ raise FileNotFoundError(f"Path {model} does not exist.")
return model
diff --git a/src/mlia/core/common.py b/src/mlia/core/common.py
index baaed50..e437a75 100644
--- a/src/mlia/core/common.py
+++ b/src/mlia/core/common.py
@@ -36,7 +36,7 @@ class AdviceCategory(Flag):
category_names = [item.name for item in AdviceCategory]
for advice_value in values:
if advice_value.upper() not in category_names:
- raise Exception(f"Invalid advice category {advice_value}")
+ raise ValueError(f"Invalid advice category {advice_value}.")
return {AdviceCategory[value.upper()] for value in values}
diff --git a/src/mlia/core/events.py b/src/mlia/core/events.py
index e328cc1..ae22771 100644
--- a/src/mlia/core/events.py
+++ b/src/mlia/core/events.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Module for the events and related functionality.
@@ -267,14 +267,14 @@ class EventDispatcherMetaclass(type):
"""
def __new__(
- cls,
+ mcs,
clsname: str,
bases: tuple[type, ...],
namespace: dict[str, Any],
event_handler_method_prefix: str = "on_",
) -> Any:
"""Create event dispatcher and link event handlers."""
- new_class = super().__new__(cls, clsname, bases, namespace)
+ new_class = super().__new__(mcs, clsname, bases, namespace)
@singledispatchmethod
def dispatcher(_self: Any, _event: Event) -> Any:
diff --git a/src/mlia/core/mixins.py b/src/mlia/core/mixins.py
index 5ef9d66..e50e6f7 100644
--- a/src/mlia/core/mixins.py
+++ b/src/mlia/core/mixins.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Mixins module."""
from __future__ import annotations
@@ -35,21 +35,21 @@ class ParameterResolverMixin:
ctx = context or self.context
if ctx.config_parameters is None:
- raise Exception("Configuration parameters are not set")
+ raise ValueError("Configuration parameters are not set.")
section_params = ctx.config_parameters.get(section)
if section_params is None or not isinstance(section_params, dict):
- raise Exception(
+ raise ValueError(
f"Parameter section {section} has wrong format, "
- "expected to be a dictionary"
+ "expected to be a dictionary."
)
value = section_params.get(name)
if not value and expected:
- raise Exception(f"Parameter {name} is not set")
+ raise ValueError(f"Parameter {name} is not set.")
if value and expected_type is not None and not isinstance(value, expected_type):
- raise Exception(f"Parameter {name} expected to have type {expected_type}")
+ raise TypeError(f"Parameter {name} expected to have type {expected_type}.")
return value
diff --git a/src/mlia/core/reporting.py b/src/mlia/core/reporting.py
index 7b9ce5c..722adfd 100644
--- a/src/mlia/core/reporting.py
+++ b/src/mlia/core/reporting.py
@@ -427,7 +427,7 @@ class SingleRow(Table):
def to_plain_text(self, **kwargs: Any) -> str:
"""Produce report in human readable format."""
if len(self.rows) != 1:
- raise Exception("Table should have only one row")
+ raise RuntimeError(f"Table should have only one row, but has {self.rows}.")
items = "\n".join(
column.header.ljust(35) + str(item).rjust(25)
diff --git a/src/mlia/nn/tensorflow/config.py b/src/mlia/nn/tensorflow/config.py
index 0c3133a..d7d430f 100644
--- a/src/mlia/nn/tensorflow/config.py
+++ b/src/mlia/nn/tensorflow/config.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Model configuration."""
from __future__ import annotations
@@ -110,8 +110,8 @@ def get_model(model: str | Path) -> ModelConfiguration:
if is_saved_model(model):
return TfModel(model)
- raise Exception(
- "The input model format is not supported"
+ raise ValueError(
+ "The input model format is not supported "
"(supported formats: TensorFlow Lite, Keras, TensorFlow saved model)!"
)
diff --git a/src/mlia/nn/tensorflow/optimizations/select.py b/src/mlia/nn/tensorflow/optimizations/select.py
index d4a8ea4..a78df12 100644
--- a/src/mlia/nn/tensorflow/optimizations/select.py
+++ b/src/mlia/nn/tensorflow/optimizations/select.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Module for optimization selection."""
from __future__ import annotations
@@ -64,7 +64,7 @@ class OptimizationSettings(NamedTuple):
self.optimization_type, next_target, self.layers_to_optimize
)
- raise Exception(f"Unknown optimization type {self.optimization_type}")
+ raise ValueError(f"Optimization type {self.optimization_type} is unknown.")
class MultiStageOptimizer(Optimizer):
diff --git a/src/mlia/nn/tensorflow/utils.py b/src/mlia/nn/tensorflow/utils.py
index 287e6ff..d688a63 100644
--- a/src/mlia/nn/tensorflow/utils.py
+++ b/src/mlia/nn/tensorflow/utils.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-FileCopyrightText: Copyright The TensorFlow Authors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Collection of useful functions for optimizations."""
@@ -22,7 +22,7 @@ def representative_dataset(
) -> Callable:
"""Sample dataset used for quantization."""
if input_shape[0] != 1:
- raise Exception("Only the input batch_size=1 is supported!")
+ raise ValueError("Only the input batch_size=1 is supported!")
def dataset() -> Iterable:
for _ in range(sample_count):
@@ -41,7 +41,7 @@ def get_tf_tensor_shape(model: str) -> list:
default_signature = loaded.signatures[default_signature_key]
inputs_tensor_info = default_signature.inputs
except KeyError as err:
- raise Exception(f"Signature '{default_signature_key}' not found") from err
+ raise KeyError(f"Signature '{default_signature_key}' not found.") from err
return [
dim
diff --git a/src/mlia/target/cortex_a/advisor.py b/src/mlia/target/cortex_a/advisor.py
index 3c127ec..db07b96 100644
--- a/src/mlia/target/cortex_a/advisor.py
+++ b/src/mlia/target/cortex_a/advisor.py
@@ -45,12 +45,12 @@ class CortexAInferenceAdvisor(DefaultInferenceAdvisor):
collectors.append(CortexAOperatorCompatibility(model, target_config))
if context.category_enabled(AdviceCategory.PERFORMANCE):
- raise Exception(
+ raise RuntimeError(
"Performance estimation is currently not supported for Cortex-A."
)
if context.category_enabled(AdviceCategory.OPTIMIZATION):
- raise Exception(
+ raise RuntimeError(
"Model optimizations are currently not supported for Cortex-A."
)
diff --git a/src/mlia/target/cortex_a/operators.py b/src/mlia/target/cortex_a/operators.py
index cd92f31..4a8d992 100644
--- a/src/mlia/target/cortex_a/operators.py
+++ b/src/mlia/target/cortex_a/operators.py
@@ -143,7 +143,7 @@ def get_cortex_a_compatibility_info(
def report() -> None:
"""Generate supported operators report."""
- raise Exception(
+ raise NotImplementedError(
"Generating a supported operators report is not "
"currently supported with Cortex-A target profile."
)
diff --git a/src/mlia/target/cortex_a/reporters.py b/src/mlia/target/cortex_a/reporters.py
index e7247f5..7f4f21b 100644
--- a/src/mlia/target/cortex_a/reporters.py
+++ b/src/mlia/target/cortex_a/reporters.py
@@ -87,4 +87,4 @@ def cortex_a_formatters(data: Any) -> Callable[[Any], Report]:
if isinstance(data, CortexACompatibilityInfo):
return report_cortex_a_operators
- raise Exception(f"Unable to find appropriate formatter for {data}.")
+ raise RuntimeError(f"Unable to find appropriate formatter for {data}.")
diff --git a/src/mlia/target/ethos_u/advisor.py b/src/mlia/target/ethos_u/advisor.py
index 714d6a4..d2c308a 100644
--- a/src/mlia/target/ethos_u/advisor.py
+++ b/src/mlia/target/ethos_u/advisor.py
@@ -54,7 +54,7 @@ class EthosUInferenceAdvisor(DefaultInferenceAdvisor):
if is_tflite_model(model):
# TensorFlow Lite models do not support optimization (only performance)!
if context.category_enabled(AdviceCategory.OPTIMIZATION):
- raise Exception(
+ raise RuntimeError(
"Optimizations are not supported for TensorFlow Lite files."
)
if context.category_enabled(AdviceCategory.PERFORMANCE):
@@ -170,7 +170,7 @@ def _get_config_parameters(
backends = extra_args.get("backends")
if backends is not None:
if not is_list_of(backends, str):
- raise Exception("Backends value has wrong format")
+ raise ValueError("Backends value has wrong format.")
advisor_parameters["ethos_u_inference_advisor"]["backends"] = backends
@@ -179,7 +179,7 @@ def _get_config_parameters(
optimization_targets = _DEFAULT_OPTIMIZATION_TARGETS
if not is_list_of(optimization_targets, dict):
- raise Exception("Optimization targets value has wrong format")
+ raise ValueError("Optimization targets value has wrong format.")
advisor_parameters.update(
{
diff --git a/src/mlia/target/ethos_u/data_collection.py b/src/mlia/target/ethos_u/data_collection.py
index 8348393..0654143 100644
--- a/src/mlia/target/ethos_u/data_collection.py
+++ b/src/mlia/target/ethos_u/data_collection.py
@@ -178,7 +178,7 @@ class EthosUOptimizationPerformance(ContextAwareDataCollector):
) -> list[list[OptimizationSettings]]:
"""Parse optimization parameters."""
if not is_list_of(optimizations, list):
- raise Exception("Optimization parameters expected to be a list")
+ raise ValueError("Optimization parameters expected to be a list.")
return [
[
diff --git a/src/mlia/target/ethos_u/reporters.py b/src/mlia/target/ethos_u/reporters.py
index 00e68b5..711f036 100644
--- a/src/mlia/target/ethos_u/reporters.py
+++ b/src/mlia/target/ethos_u/reporters.py
@@ -390,6 +390,6 @@ def ethos_u_formatters(data: Any) -> Callable[[Any], Report]:
report = report_tflite_compatiblity
else:
- raise Exception(f"Unable to find appropriate formatter for {data}")
+ raise RuntimeError(f"Unable to find appropriate formatter for {data}.")
return report
diff --git a/src/mlia/target/tosa/advisor.py b/src/mlia/target/tosa/advisor.py
index 7859eca..2d5163e 100644
--- a/src/mlia/target/tosa/advisor.py
+++ b/src/mlia/target/tosa/advisor.py
@@ -44,12 +44,14 @@ class TOSAInferenceAdvisor(DefaultInferenceAdvisor):
collectors.append(TOSAOperatorCompatibility(model))
if context.category_enabled(AdviceCategory.PERFORMANCE):
- raise Exception(
+ raise RuntimeError(
"Performance estimation is currently not supported for TOSA."
)
if context.category_enabled(AdviceCategory.OPTIMIZATION):
- raise Exception("Model optimizations are currently not supported for TOSA.")
+ raise RuntimeError(
+ "Model optimizations are currently not supported for TOSA."
+ )
return collectors
diff --git a/src/mlia/target/tosa/operators.py b/src/mlia/target/tosa/operators.py
index b75ceb0..62f2e76 100644
--- a/src/mlia/target/tosa/operators.py
+++ b/src/mlia/target/tosa/operators.py
@@ -1,11 +1,11 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Operators module."""
def report() -> None:
"""Generate supported operators report."""
- raise Exception(
+ raise NotImplementedError(
"Generating a supported operators report is not "
"currently supported with TOSA target profile."
)
diff --git a/src/mlia/target/tosa/reporters.py b/src/mlia/target/tosa/reporters.py
index e10f047..7b91e94 100644
--- a/src/mlia/target/tosa/reporters.py
+++ b/src/mlia/target/tosa/reporters.py
@@ -168,4 +168,4 @@ def tosa_formatters(data: Any) -> Callable[[Any], Report]:
if isinstance(data, TFLiteCompatibilityInfo):
return report_tflite_compatiblity
- raise Exception(f"Unable to find appropriate formatter for {data}")
+ raise RuntimeError(f"Unable to find appropriate formatter for {data}.")
diff --git a/src/mlia/utils/console.py b/src/mlia/utils/console.py
index 57e3ba2..1f432fb 100644
--- a/src/mlia/utils/console.py
+++ b/src/mlia/utils/console.py
@@ -77,7 +77,7 @@ def _get_table(table_style: str) -> Table:
if table_style == "no_borders":
return Table(show_header=False, box=None)
- raise Exception(f"Unsupported table style {table_style}")
+ raise ValueError(f"Table style {table_style} is not supported.")
def _convert_to_text(*renderables: RenderableType) -> str:
diff --git a/src/mlia/utils/logging.py b/src/mlia/utils/logging.py
index 17f6cae..07b16df 100644
--- a/src/mlia/utils/logging.py
+++ b/src/mlia/utils/logging.py
@@ -181,7 +181,7 @@ def create_log_handler(
handler = logging.StreamHandler(stream)
if handler is None:
- raise Exception("Unable to create logging handler")
+ raise RuntimeError("Unable to create logging handler.")
if log_level:
handler.setLevel(log_level)
diff --git a/tests/test_cli_options.py b/tests/test_cli_options.py
index 94c3111..c02ef89 100644
--- a/tests/test_cli_options.py
+++ b/tests/test_cli_options.py
@@ -26,11 +26,11 @@ from mlia.core.typing import OutputFormat
None,
does_not_raise(),
[
- dict(
- optimization_type="pruning",
- optimization_target=0.5,
- layers_to_optimize=None,
- )
+ {
+ "optimization_type": "pruning",
+ "optimization_target": 0.5,
+ "layers_to_optimize": None,
+ }
],
],
[
@@ -40,11 +40,11 @@ from mlia.core.typing import OutputFormat
None,
does_not_raise(),
[
- dict(
- optimization_type="pruning",
- optimization_target=0.5,
- layers_to_optimize=None,
- )
+ {
+ "optimization_type": "pruning",
+ "optimization_target": 0.5,
+ "layers_to_optimize": None,
+ }
],
],
[
@@ -54,11 +54,11 @@ from mlia.core.typing import OutputFormat
None,
does_not_raise(),
[
- dict(
- optimization_type="clustering",
- optimization_target=32,
- layers_to_optimize=None,
- )
+ {
+ "optimization_type": "clustering",
+ "optimization_target": 32,
+ "layers_to_optimize": None,
+ }
],
],
[
@@ -68,16 +68,16 @@ from mlia.core.typing import OutputFormat
None,
does_not_raise(),
[
- dict(
- optimization_type="pruning",
- optimization_target=0.5,
- layers_to_optimize=None,
- ),
- dict(
- optimization_type="clustering",
- optimization_target=32,
- layers_to_optimize=None,
- ),
+ {
+ "optimization_type": "pruning",
+ "optimization_target": 0.5,
+ "layers_to_optimize": None,
+ },
+ {
+ "optimization_type": "clustering",
+ "optimization_target": 32,
+ "layers_to_optimize": None,
+ },
],
],
[
@@ -87,11 +87,11 @@ from mlia.core.typing import OutputFormat
None,
does_not_raise(),
[
- dict(
- optimization_type="pruning",
- optimization_target=0.4,
- layers_to_optimize=None,
- )
+ {
+ "optimization_type": "pruning",
+ "optimization_target": 0.4,
+ "layers_to_optimize": None,
+ }
],
],
[
@@ -113,11 +113,11 @@ from mlia.core.typing import OutputFormat
32.2,
does_not_raise(),
[
- dict(
- optimization_type="clustering",
- optimization_target=32.2,
- layers_to_optimize=None,
- )
+ {
+ "optimization_type": "clustering",
+ "optimization_target": 32.2,
+ "layers_to_optimize": None,
+ }
],
],
],
diff --git a/tests/test_nn_tensorflow_config.py b/tests/test_nn_tensorflow_config.py
index 1a6fbe3..656619d 100644
--- a/tests/test_nn_tensorflow_config.py
+++ b/tests/test_nn_tensorflow_config.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Tests for config module."""
from contextlib import ExitStack as does_not_raise
@@ -45,10 +45,12 @@ def test_convert_tf_to_tflite(tmp_path: Path, test_tf_model: Path) -> None:
"test.model",
None,
pytest.raises(
- Exception,
- match="The input model format is not supported"
- r"\(supported formats: TensorFlow Lite, Keras, "
- r"TensorFlow saved model\)!",
+ ValueError,
+ match=(
+ "The input model format is not supported "
+ r"\(supported formats: TensorFlow Lite, Keras, "
+ r"TensorFlow saved model\)!"
+ ),
),
),
],
diff --git a/tests/test_nn_tensorflow_optimizations_select.py b/tests/test_nn_tensorflow_optimizations_select.py
index e22a9d8..f5ba6f0 100644
--- a/tests/test_nn_tensorflow_optimizations_select.py
+++ b/tests/test_nn_tensorflow_optimizations_select.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Tests for module select."""
from __future__ import annotations
@@ -10,6 +10,7 @@ from typing import Any
import pytest
import tensorflow as tf
+from mlia.core.errors import ConfigurationError
from mlia.nn.tensorflow.optimizations.clustering import Clusterer
from mlia.nn.tensorflow.optimizations.clustering import ClusteringConfiguration
from mlia.nn.tensorflow.optimizations.pruning import Pruner
@@ -55,7 +56,7 @@ from mlia.nn.tensorflow.optimizations.select import OptimizationSettings
layers_to_optimize=None,
),
pytest.raises(
- Exception,
+ ConfigurationError,
match="Optimization target should be a "
"positive integer. "
"Optimization target provided: 0.5",
@@ -76,7 +77,7 @@ from mlia.nn.tensorflow.optimizations.select import OptimizationSettings
layers_to_optimize="all", # type: ignore
),
pytest.raises(
- Exception,
+ ConfigurationError,
match="Unsupported optimization type: superoptimization",
),
None,
@@ -89,7 +90,7 @@ from mlia.nn.tensorflow.optimizations.select import OptimizationSettings
layers_to_optimize=None,
),
pytest.raises(
- Exception,
+ ConfigurationError,
match="Optimization type is not provided",
),
None,
@@ -225,7 +226,7 @@ def test_optimization_settings_create_from(
OptimizationSettings("super_optimization", 42, None),
None,
pytest.raises(
- Exception, match="Unknown optimization type super_optimization"
+ Exception, match="Optimization type super_optimization is unknown."
),
],
],
diff --git a/tests/test_utils_console.py b/tests/test_utils_console.py
index 0a537a6..59bc6f4 100644
--- a/tests/test_utils_console.py
+++ b/tests/test_utils_console.py
@@ -53,7 +53,7 @@ def test_produce_table(
def test_produce_table_unknown_style() -> None:
"""Test that function should fail if unknown style provided."""
- with pytest.raises(Exception, match="Unsupported table style unknown_style"):
+ with pytest.raises(ValueError, match="Table style unknown_style is not supported."):
produce_table([["1", "2", "3"]], [], "unknown_style")
diff --git a/tests/test_utils_filesystem.py b/tests/test_utils_filesystem.py
index 654f5c8..c1c9876 100644
--- a/tests/test_utils_filesystem.py
+++ b/tests/test_utils_filesystem.py
@@ -40,12 +40,12 @@ def test_get_mlia_target_profiles() -> None:
@pytest.mark.parametrize("raise_exception", [True, False])
def test_temp_file(raise_exception: bool) -> None:
"""Test temp_file context manager."""
- with contextlib.suppress(Exception):
+ with contextlib.suppress(RuntimeError):
with temp_file() as tmp_path:
assert tmp_path.is_file()
if raise_exception:
- raise Exception("Error!")
+ raise RuntimeError("Error!")
assert not tmp_path.exists()
diff --git a/tests_e2e/test_e2e.py b/tests_e2e/test_e2e.py
index c164901..2d8c375 100644
--- a/tests_e2e/test_e2e.py
+++ b/tests_e2e/test_e2e.py
@@ -44,13 +44,13 @@ class ExecutionConfiguration:
def from_dict(cls, exec_info: dict) -> ExecutionConfiguration:
"""Create instance from the dictionary."""
if not (command := exec_info.get("command")):
- raise Exception("Command is not defined")
+ raise ValueError("Command is not defined.")
if command not in VALID_COMMANDS:
- raise Exception(f"Unknown command {command}")
+ raise ValueError(f"Command {command} is unknown.")
if not (params := exec_info.get("parameters")):
- raise Exception(f"Command {command} should have parameters")
+ raise ValueError(f"Command {command} should have parameters.")
assert isinstance(params, dict), "Parameters should be a dictionary"
assert all(
@@ -101,13 +101,13 @@ def launch_and_wait(
if print_output:
print(output)
else:
- raise Exception("Unable to get process output")
+ raise RuntimeError("Unable to get process output. stdout is unavailable.")
# Wait for the process to terminate
process.wait()
if (exit_code := process.poll()) != 0:
- raise Exception(f"Command failed with exit_code {exit_code}")
+ raise RuntimeError(f"Command failed with exit_code {exit_code}.")
def run_command(
@@ -142,11 +142,11 @@ def get_config_file() -> Path:
"""Get path to the configuration file."""
env_var_name = "MLIA_E2E_CONFIG_FILE"
if not (config_file_env_var := os.environ.get(env_var_name)):
- raise Exception(f"Config file env variable ({env_var_name}) is not set")
+ raise ValueError(f"Config file env variable ({env_var_name}) is not set.")
config_file = Path(config_file_env_var)
if not config_file.is_file():
- raise Exception(f"Invalid config file {config_file_env_var}")
+ raise FileNotFoundError(f"Invalid config file {config_file_env_var}.")
return config_file
diff --git a/tox.ini b/tox.ini
index 9aedfbe..1aa27bc 100644
--- a/tox.ini
+++ b/tox.ini
@@ -7,7 +7,7 @@ isolated_build = true
[testenv:test]
description = Run the unit tests.
deps =
- pytest==7.2.0
+ pytest==7.4.0
commands =
pytest {posargs:tests/}
@@ -35,7 +35,7 @@ commands =
description = Run the code coverage of the unit tests.
deps =
{[testenv:test]deps}
- pytest-cov==4.0.0
+ pytest-cov==4.1.0
commands =
pytest --cov=mlia --cov-report term-missing --cov-fail-under=95 tests/
@@ -45,8 +45,8 @@ description = Run and setup the pre-commit hooks.
envdir={toxworkdir}/lint
deps =
{[testenv:test]deps}
- mypy==1.2.0
- pylint==2.15.5
+ mypy==1.5.1
+ pylint==2.17.5
pre-commit
# Pass the following environment variables:
# - HOME: Workaround for an issue with markdownlint in a docker environment