aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitrii Agibov <dmitrii.agibov@arm.com>2022-10-04 15:18:55 +0100
committerDmitrii Agibov <dmitrii.agibov@arm.com>2022-10-04 15:34:38 +0100
commit366ac7437383d3e9a99a971e61afbc5c466bbfcc (patch)
tree8fca425ad9f03817391bdc1ca9c6dfdc9dc44489
parent2d6ec94a2a085e7fa4fc23c4c7dcda3206eec98c (diff)
downloadmlia-366ac7437383d3e9a99a971e61afbc5c466bbfcc.tar.gz
MLIA-673 Use inclusive language in the source code and filenames
- Update configuration for inclusive language linter - Fix reported issues Change-Id: If0f8b6e20c17d8ee1c6179c61040fc351437f036
-rw-r--r--setup.cfg2
-rw-r--r--src/mlia/cli/options.py8
-rw-r--r--tests/conftest.py12
-rw-r--r--tests/test_backend_common.py4
-rw-r--r--tests/test_backend_system.py15
-rw-r--r--tests/test_cli_commands.py16
-rw-r--r--tests/test_core_advice_generation.py6
-rw-r--r--tests/test_core_mixins.py18
-rw-r--r--tests/test_devices_ethosu_data_collection.py16
-rw-r--r--tests/test_nn_tensorflow_tflite_metrics.py8
-rw-r--r--tests/test_resources/backends/applications/readme.txt2
-rw-r--r--tests/test_resources/backends/systems/system1/system_artifact/empty.txt (renamed from tests/test_resources/backends/systems/system1/system_artifact/dummy.txt)0
-rw-r--r--tests/test_resources/hello_world.json14
13 files changed, 53 insertions, 68 deletions
diff --git a/setup.cfg b/setup.cfg
index 128574c..fc23064 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -73,7 +73,7 @@ select = B,C,E,F,W,T4
max_issue_threshold=1
# Blocklist: Words to lint in any context, with possibly special characters
# between, case insensitive
-blocklist=master,slave,blacklist,whitelist
+blocklist=master,slave,blacklist,whitelist,dummy
# Word list: Words to lint as whole words, with possibly special characters
# between, case insensitive
wordlist=he,she,him,her,his,hers
diff --git a/src/mlia/cli/options.py b/src/mlia/cli/options.py
index 3f0dc1f..f7f95c0 100644
--- a/src/mlia/cli/options.py
+++ b/src/mlia/cli/options.py
@@ -254,13 +254,13 @@ def get_target_profile_opts(device_args: dict | None) -> list[str]:
if not device_args:
return []
- dummy_parser = argparse.ArgumentParser()
- add_target_options(dummy_parser)
- args = dummy_parser.parse_args([])
+ parser = argparse.ArgumentParser()
+ add_target_options(parser)
+ args = parser.parse_args([])
params_name = {
action.dest: param_name
- for param_name, action in dummy_parser._option_string_actions.items() # pylint: disable=protected-access
+ for param_name, action in parser._option_string_actions.items() # pylint: disable=protected-access
}
non_default = [
diff --git a/tests/conftest.py b/tests/conftest.py
index 4d12033..1cb3dcd 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -24,9 +24,9 @@ def fixture_test_resources_path() -> Path:
return Path(__file__).parent / "test_resources"
-@pytest.fixture(name="dummy_context")
-def fixture_dummy_context(tmpdir: str) -> ExecutionContext:
- """Return dummy context fixture."""
+@pytest.fixture(name="sample_context")
+def fixture_sample_context(tmpdir: str) -> ExecutionContext:
+ """Return sample context fixture."""
return ExecutionContext(working_dir=tmpdir)
@@ -44,19 +44,19 @@ def test_applications_path(test_resources_path: Path) -> Path:
@pytest.fixture(scope="session")
def non_optimised_input_model_file(test_tflite_model: Path) -> Path:
- """Provide the path to a quantized dummy model file."""
+ """Provide the path to a quantized test model file."""
return test_tflite_model
@pytest.fixture(scope="session")
def optimised_input_model_file(test_tflite_vela_model: Path) -> Path:
- """Provide path to Vela-optimised dummy model file."""
+ """Provide path to Vela-optimised test model file."""
return test_tflite_vela_model
@pytest.fixture(scope="session")
def invalid_input_model_file(test_tflite_invalid_model: Path) -> Path:
- """Provide the path to an invalid dummy model file."""
+ """Provide the path to an invalid test model file."""
return test_tflite_invalid_model
diff --git a/tests/test_backend_common.py b/tests/test_backend_common.py
index d11261e..40a0cff 100644
--- a/tests/test_backend_common.py
+++ b/tests/test_backend_common.py
@@ -112,7 +112,7 @@ class TestBackend:
@pytest.mark.parametrize(
"parameter, valid",
[
- ("--choice-param dummy_value_1", True),
+ ("--choice-param value_1", True),
("--choice-param wrong_value", False),
("--open-param something", True),
("--wrong-param value", False),
@@ -313,7 +313,7 @@ class TestBackend:
self, param_name: str, user_param: str, expected_value: str
) -> None:
"""Test different variants to provide user parameters."""
- # A dummy config providing one backend config
+ # A sample config providing one backend config
config = {
"name": "test_backend",
"commands": {
diff --git a/tests/test_backend_system.py b/tests/test_backend_system.py
index 7a8b1de..ecc149d 100644
--- a/tests/test_backend_system.py
+++ b/tests/test_backend_system.py
@@ -6,7 +6,6 @@ from __future__ import annotations
from contextlib import ExitStack as does_not_raise
from pathlib import Path
from typing import Any
-from typing import Callable
from unittest.mock import MagicMock
import pytest
@@ -24,20 +23,6 @@ from mlia.backend.system import remove_system
from mlia.backend.system import System
-def dummy_resolver(
- values: dict[str, str] | None = None
-) -> Callable[[str, str, list[tuple[str | None, Param]]], str]:
- """Return dummy parameter resolver implementation."""
- # pylint: disable=unused-argument
- def resolver(
- param: str, cmd: str, param_values: list[tuple[str | None, Param]]
- ) -> str:
- """Implement dummy parameter resolver."""
- return values.get(param, "") if values else ""
-
- return resolver
-
-
def test_get_available_systems() -> None:
"""Test get_available_systems mocking get_resources."""
available_systems = get_available_systems()
diff --git a/tests/test_cli_commands.py b/tests/test_cli_commands.py
index eaa08e6..fd9e29c 100644
--- a/tests/test_cli_commands.py
+++ b/tests/test_cli_commands.py
@@ -22,19 +22,19 @@ from mlia.devices.ethosu.performance import PerformanceMetrics
from mlia.tools.metadata.common import InstallationManager
-def test_operators_expected_parameters(dummy_context: ExecutionContext) -> None:
+def test_operators_expected_parameters(sample_context: ExecutionContext) -> None:
"""Test operators command wrong parameters."""
with pytest.raises(Exception, match="Model is not provided"):
- operators(dummy_context, "ethos-u55-256")
+ operators(sample_context, "ethos-u55-256")
def test_performance_unknown_target(
- dummy_context: ExecutionContext, test_tflite_model: Path
+ sample_context: ExecutionContext, test_tflite_model: Path
) -> None:
"""Test that command should fail if unknown target passed."""
with pytest.raises(Exception, match="Unable to find target profile unknown"):
performance(
- dummy_context, model=str(test_tflite_model), target_profile="unknown"
+ sample_context, model=str(test_tflite_model), target_profile="unknown"
)
@@ -74,7 +74,7 @@ def test_performance_unknown_target(
],
)
def test_opt_expected_parameters(
- dummy_context: ExecutionContext,
+ sample_context: ExecutionContext,
target_profile: str,
monkeypatch: pytest.MonkeyPatch,
optimization_type: str,
@@ -87,7 +87,7 @@ def test_opt_expected_parameters(
with expected_error:
optimization(
- ctx=dummy_context,
+ ctx=sample_context,
target_profile=target_profile,
model=str(test_keras_model),
optimization_type=optimization_type,
@@ -105,7 +105,7 @@ def test_opt_expected_parameters(
)
def test_opt_valid_optimization_target(
target_profile: str,
- dummy_context: ExecutionContext,
+ sample_context: ExecutionContext,
optimization_type: str,
optimization_target: str,
monkeypatch: pytest.MonkeyPatch,
@@ -115,7 +115,7 @@ def test_opt_valid_optimization_target(
mock_performance_estimation(monkeypatch)
optimization(
- ctx=dummy_context,
+ ctx=sample_context,
target_profile=target_profile,
model=str(test_keras_model),
optimization_type=optimization_type,
diff --git a/tests/test_core_advice_generation.py b/tests/test_core_advice_generation.py
index f5e2960..3d985eb 100644
--- a/tests/test_core_advice_generation.py
+++ b/tests/test_core_advice_generation.py
@@ -47,7 +47,7 @@ def test_advice_generation() -> None:
def test_advice_category_decorator(
category: AdviceCategory,
expected_advice: list[Advice],
- dummy_context: Context,
+ sample_context: Context,
) -> None:
"""Test for advice_category decorator."""
@@ -60,10 +60,10 @@ def test_advice_category_decorator(
self.add_advice(["Good advice!"])
producer = SampleAdviceProducer()
- dummy_context.update(
+ sample_context.update(
advice_category=category, event_handlers=[], config_parameters={}
)
- producer.set_context(dummy_context)
+ producer.set_context(sample_context)
producer.produce_advice("some_data")
advice = producer.get_advice()
diff --git a/tests/test_core_mixins.py b/tests/test_core_mixins.py
index d66213d..3834fb3 100644
--- a/tests/test_core_mixins.py
+++ b/tests/test_core_mixins.py
@@ -10,22 +10,22 @@ from mlia.core.mixins import ContextMixin
from mlia.core.mixins import ParameterResolverMixin
-def test_context_mixin(dummy_context: Context) -> None:
+def test_context_mixin(sample_context: Context) -> None:
"""Test ContextMixin."""
class SampleClass(ContextMixin):
"""Sample class."""
sample_object = SampleClass()
- sample_object.set_context(dummy_context)
- assert sample_object.context == dummy_context
+ sample_object.set_context(sample_context)
+ assert sample_object.context == sample_context
class TestParameterResolverMixin:
"""Tests for parameter resolver mixin."""
@staticmethod
- def test_parameter_resolver_mixin(dummy_context: ExecutionContext) -> None:
+ def test_parameter_resolver_mixin(sample_context: ExecutionContext) -> None:
"""Test ParameterResolverMixin."""
class SampleClass(ParameterResolverMixin):
@@ -33,7 +33,7 @@ class TestParameterResolverMixin:
def __init__(self) -> None:
"""Init sample object."""
- self.context = dummy_context
+ self.context = sample_context
self.context.update(
advice_category=AdviceCategory.OPERATORS,
@@ -55,7 +55,7 @@ class TestParameterResolverMixin:
@staticmethod
def test_parameter_resolver_mixin_no_config(
- dummy_context: ExecutionContext,
+ sample_context: ExecutionContext,
) -> None:
"""Test ParameterResolverMixin without config params."""
@@ -64,7 +64,7 @@ class TestParameterResolverMixin:
def __init__(self) -> None:
"""Init sample object."""
- self.context = dummy_context
+ self.context = sample_context
with pytest.raises(Exception, match="Configuration parameters are not set"):
sample_object_no_config = SampleClassNoConfig()
@@ -72,7 +72,7 @@ class TestParameterResolverMixin:
@staticmethod
def test_parameter_resolver_mixin_bad_section(
- dummy_context: ExecutionContext,
+ sample_context: ExecutionContext,
) -> None:
"""Test ParameterResolverMixin without config params."""
@@ -81,7 +81,7 @@ class TestParameterResolverMixin:
def __init__(self) -> None:
"""Init sample object."""
- self.context = dummy_context
+ self.context = sample_context
self.context.update(
advice_category=AdviceCategory.OPERATORS,
event_handlers=[],
diff --git a/tests/test_devices_ethosu_data_collection.py b/tests/test_devices_ethosu_data_collection.py
index 897cf41..a4f37aa 100644
--- a/tests/test_devices_ethosu_data_collection.py
+++ b/tests/test_devices_ethosu_data_collection.py
@@ -47,20 +47,20 @@ def test_collectors_metadata(
def test_operator_compatibility_collector(
- dummy_context: Context, test_tflite_model: Path
+ sample_context: Context, test_tflite_model: Path
) -> None:
"""Test operator compatibility data collector."""
device = EthosUConfiguration("ethos-u55-256")
collector = EthosUOperatorCompatibility(test_tflite_model, device)
- collector.set_context(dummy_context)
+ collector.set_context(sample_context)
result = collector.collect_data()
assert isinstance(result, Operators)
def test_performance_collector(
- monkeypatch: pytest.MonkeyPatch, dummy_context: Context, test_tflite_model: Path
+ monkeypatch: pytest.MonkeyPatch, sample_context: Context, test_tflite_model: Path
) -> None:
"""Test performance data collector."""
device = EthosUConfiguration("ethos-u55-256")
@@ -68,7 +68,7 @@ def test_performance_collector(
mock_performance_estimation(monkeypatch, device)
collector = EthosUPerformance(test_tflite_model, device)
- collector.set_context(dummy_context)
+ collector.set_context(sample_context)
result = collector.collect_data()
assert isinstance(result, PerformanceMetrics)
@@ -76,7 +76,7 @@ def test_performance_collector(
def test_optimization_performance_collector(
monkeypatch: pytest.MonkeyPatch,
- dummy_context: Context,
+ sample_context: Context,
test_keras_model: Path,
test_tflite_model: Path,
) -> None:
@@ -93,7 +93,7 @@ def test_optimization_performance_collector(
]
],
)
- collector.set_context(dummy_context)
+ collector.set_context(sample_context)
result = collector.collect_data()
assert isinstance(result, OptimizationPerformanceMetrics)
@@ -122,7 +122,7 @@ def test_optimization_performance_collector(
]
],
)
- collector_tflite.set_context(dummy_context)
+ collector_tflite.set_context(sample_context)
with pytest.raises(FunctionalityNotSupportedError):
collector_tflite.collect_data()
@@ -132,7 +132,7 @@ def test_optimization_performance_collector(
collector_bad_config = EthosUOptimizationPerformance(
test_keras_model, device, {"optimization_type": "pruning"} # type: ignore
)
- collector.set_context(dummy_context)
+ collector.set_context(sample_context)
collector_bad_config.collect_data()
diff --git a/tests/test_nn_tensorflow_tflite_metrics.py b/tests/test_nn_tensorflow_tflite_metrics.py
index a5e7736..ca4ab55 100644
--- a/tests/test_nn_tensorflow_tflite_metrics.py
+++ b/tests/test_nn_tensorflow_tflite_metrics.py
@@ -17,8 +17,8 @@ from mlia.nn.tensorflow.tflite_metrics import ReportClusterMode
from mlia.nn.tensorflow.tflite_metrics import TFLiteMetrics
-def _dummy_keras_model() -> tf.keras.Model:
- # Create a dummy model
+def _sample_keras_model() -> tf.keras.Model:
+ # Create a sample model
keras_model = tf.keras.Sequential(
[
tf.keras.Input(shape=(8, 8, 3)),
@@ -40,7 +40,7 @@ def _sparse_binary_keras_model() -> tf.keras.Model:
value[...] = 1.0
return weights
- keras_model = _dummy_keras_model()
+ keras_model = _sample_keras_model()
# Assign weights to have 0.5 sparsity
for layer in keras_model.layers:
if not isinstance(layer, tf.keras.layers.Flatten):
@@ -74,7 +74,7 @@ class TestTFLiteMetrics:
@staticmethod
def test_sparsity(metrics: TFLiteMetrics) -> None:
"""Test sparsity."""
- # Create new instance with a dummy TFLite file
+ # Create new instance with a sample TFLite file
# Check sparsity calculation
sparsity_per_layer = metrics.sparsity_per_layer()
for name, sparsity in sparsity_per_layer.items():
diff --git a/tests/test_resources/backends/applications/readme.txt b/tests/test_resources/backends/applications/readme.txt
index a1f8209..d3e6fe2 100644
--- a/tests/test_resources/backends/applications/readme.txt
+++ b/tests/test_resources/backends/applications/readme.txt
@@ -1,4 +1,4 @@
SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
SPDX-License-Identifier: Apache-2.0
-Dummy file for test purposes
+File for test purposes
diff --git a/tests/test_resources/backends/systems/system1/system_artifact/dummy.txt b/tests/test_resources/backends/systems/system1/system_artifact/empty.txt
index 487e9d8..487e9d8 100644
--- a/tests/test_resources/backends/systems/system1/system_artifact/dummy.txt
+++ b/tests/test_resources/backends/systems/system1/system_artifact/empty.txt
diff --git a/tests/test_resources/hello_world.json b/tests/test_resources/hello_world.json
index 99e9439..28d7bd9 100644
--- a/tests/test_resources/hello_world.json
+++ b/tests/test_resources/hello_world.json
@@ -1,9 +1,9 @@
[
{
"name": "Hello world",
- "description": "Dummy application that displays 'Hello world!'",
+ "description": "Sample application that displays 'Hello world!'",
"supported_systems": [
- "Dummy System"
+ "Sample System"
],
"deploy_data": [
[
@@ -29,21 +29,21 @@
{
"name": "--choice-param",
"values": [
- "dummy_value_1",
- "dummy_value_2"
+ "value_1",
+ "value_2"
],
- "default_value": "dummy_value_1",
+ "default_value": "value_1",
"description": "Choice param"
},
{
"name": "--open-param",
"values": [],
- "default_value": "dummy_value_4",
+ "default_value": "value_4",
"description": "Open param"
},
{
"name": "--enable-flag",
- "default_value": "dummy_value_4",
+ "default_value": "value_4",
"description": "Flag param"
}
],