diff options
author | Benjamin Klimczak <benjamin.klimczak@arm.com> | 2022-07-11 12:33:42 +0100 |
---|---|---|
committer | Benjamin Klimczak <benjamin.klimczak@arm.com> | 2022-07-26 14:08:21 +0100 |
commit | 5d81f37de09efe10f90512e50252be9c36925fcf (patch) | |
tree | b4d7cdfd051da0a6e882bdfcf280fd7ca7b39e57 /tests/test_nn_tensorflow_optimizations_select.py | |
parent | 7899b908c1fe6d86b92a80f3827ddd0ac05b674b (diff) | |
download | mlia-5d81f37de09efe10f90512e50252be9c36925fcf.tar.gz |
MLIA-551 Rework remains of AIET architecture
Re-factoring the code base to further merge the old AIET code into MLIA.
- Remove last traces of the backend type 'tool'
- Controlled systems removed, including SSH protocol, controller,
RunningCommand, locks etc.
- Build command / build dir and deploy functionality removed from
Applications and Systems
- Moving working_dir()
- Replace module 'output_parser' with new module 'output_consumer' and
merge Base64 parsing into it
- Change the output consumption to optionally remove (i.e. actually
consume) lines
- Use Base64 parsing in GenericInferenceOutputParser, replacing the
regex-based parsing and remove the now unused regex parsing
- Remove AIET reporting
- Pre-install applications by moving them to src/mlia/resources/backends
- Rename aiet-config.json to backend-config.json
- Move tests from tests/mlia/ to tests/
- Adapt unit tests to code changes
- Dependencies removed: paramiko, filelock, psutil
- Fix bug in corstone.py: The wrong resource directory was used which
broke the functionality to download backends.
- Use f-string formatting.
- Use logging instead of print.
Change-Id: I768bc3bb6b2eda57d219ad01be4a8e0a74167d76
Diffstat (limited to 'tests/test_nn_tensorflow_optimizations_select.py')
-rw-r--r-- | tests/test_nn_tensorflow_optimizations_select.py | 240 |
1 files changed, 240 insertions, 0 deletions
diff --git a/tests/test_nn_tensorflow_optimizations_select.py b/tests/test_nn_tensorflow_optimizations_select.py new file mode 100644 index 0000000..5cac8ba --- /dev/null +++ b/tests/test_nn_tensorflow_optimizations_select.py @@ -0,0 +1,240 @@ +# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates. +# SPDX-License-Identifier: Apache-2.0 +"""Tests for module select.""" +from contextlib import ExitStack as does_not_raise +from pathlib import Path +from typing import Any +from typing import List +from typing import Tuple + +import pytest +import tensorflow as tf + +from mlia.nn.tensorflow.optimizations.clustering import Clusterer +from mlia.nn.tensorflow.optimizations.clustering import ClusteringConfiguration +from mlia.nn.tensorflow.optimizations.pruning import Pruner +from mlia.nn.tensorflow.optimizations.pruning import PruningConfiguration +from mlia.nn.tensorflow.optimizations.select import get_optimizer +from mlia.nn.tensorflow.optimizations.select import MultiStageOptimizer +from mlia.nn.tensorflow.optimizations.select import OptimizationSettings + + +@pytest.mark.parametrize( + "config, expected_error, expected_type, expected_config", + [ + ( + OptimizationSettings( + optimization_type="pruning", + optimization_target=0.5, + layers_to_optimize=None, + ), + does_not_raise(), + Pruner, + "pruning: 0.5", + ), + ( + PruningConfiguration(0.5), + does_not_raise(), + Pruner, + "pruning: 0.5", + ), + ( + OptimizationSettings( + optimization_type="clustering", + optimization_target=32, + layers_to_optimize=None, + ), + does_not_raise(), + Clusterer, + "clustering: 32", + ), + ( + OptimizationSettings( + optimization_type="clustering", + optimization_target=0.5, + layers_to_optimize=None, + ), + pytest.raises( + Exception, + match="Optimization target should be a " + "positive integer. " + "Optimization target provided: 0.5", + ), + None, + None, + ), + ( + ClusteringConfiguration(32), + does_not_raise(), + Clusterer, + "clustering: 32", + ), + ( + OptimizationSettings( + optimization_type="superoptimization", + optimization_target="supertarget", # type: ignore + layers_to_optimize="all", # type: ignore + ), + pytest.raises( + Exception, + match="Unsupported optimization type: superoptimization", + ), + None, + None, + ), + ( + OptimizationSettings( + optimization_type="", + optimization_target=0.5, + layers_to_optimize=None, + ), + pytest.raises( + Exception, + match="Optimization type is not provided", + ), + None, + None, + ), + ( + "wrong_config", + pytest.raises( + Exception, + match="Unknown optimization configuration wrong_config", + ), + None, + None, + ), + ( + OptimizationSettings( + optimization_type="pruning", + optimization_target=None, # type: ignore + layers_to_optimize=None, + ), + pytest.raises( + Exception, + match="Optimization target is not provided", + ), + None, + None, + ), + ( + [ + OptimizationSettings( + optimization_type="pruning", + optimization_target=0.5, + layers_to_optimize=None, + ), + OptimizationSettings( + optimization_type="clustering", + optimization_target=32, + layers_to_optimize=None, + ), + ], + does_not_raise(), + MultiStageOptimizer, + "pruning: 0.5 - clustering: 32", + ), + ], +) +def test_get_optimizer( + config: Any, + expected_error: Any, + expected_type: type, + expected_config: str, + test_keras_model: Path, +) -> None: + """Test function get_optimzer.""" + model = tf.keras.models.load_model(str(test_keras_model)) + + with expected_error: + optimizer = get_optimizer(model, config) + assert isinstance(optimizer, expected_type) + assert optimizer.optimization_config() == expected_config + + +@pytest.mark.parametrize( + "params, expected_result", + [ + ( + [], + [], + ), + ( + [("pruning", 0.5)], + [ + OptimizationSettings( + optimization_type="pruning", + optimization_target=0.5, + layers_to_optimize=None, + ) + ], + ), + ( + [("pruning", 0.5), ("clustering", 32)], + [ + OptimizationSettings( + optimization_type="pruning", + optimization_target=0.5, + layers_to_optimize=None, + ), + OptimizationSettings( + optimization_type="clustering", + optimization_target=32, + layers_to_optimize=None, + ), + ], + ), + ], +) +def test_optimization_settings_create_from( + params: List[Tuple[str, float]], expected_result: List[OptimizationSettings] +) -> None: + """Test creating settings from parsed params.""" + assert OptimizationSettings.create_from(params) == expected_result + + +@pytest.mark.parametrize( + "settings, expected_next_target, expected_error", + [ + [ + OptimizationSettings("clustering", 32, None), + OptimizationSettings("clustering", 16, None), + does_not_raise(), + ], + [ + OptimizationSettings("clustering", 4, None), + OptimizationSettings("clustering", 4, None), + does_not_raise(), + ], + [ + OptimizationSettings("clustering", 10, None), + OptimizationSettings("clustering", 8, None), + does_not_raise(), + ], + [ + OptimizationSettings("pruning", 0.5, None), + OptimizationSettings("pruning", 0.6, None), + does_not_raise(), + ], + [ + OptimizationSettings("pruning", 0.9, None), + OptimizationSettings("pruning", 0.9, None), + does_not_raise(), + ], + [ + OptimizationSettings("super_optimization", 42, None), + None, + pytest.raises( + Exception, match="Unknown optimization type super_optimization" + ), + ], + ], +) +def test_optimization_settings_next_target( + settings: OptimizationSettings, + expected_next_target: OptimizationSettings, + expected_error: Any, +) -> None: + """Test getting next optimization target.""" + with expected_error: + assert settings.next_target() == expected_next_target |