aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNathan Bailey <nathan.bailey@arm.com>2024-03-20 08:13:39 +0000
committerNathan Bailey <nathan.bailey@arm.com>2024-03-28 07:17:32 +0000
commitf3f3ab451968350b8f6df2de7c60b2c2b9320b59 (patch)
tree05d56c8e41de9b32f8054019a21b78628151310d
parent5f063ae1cfbfa2568d2858af0a0ccaf192bb1e8d (diff)
downloadmlia-f3f3ab451968350b8f6df2de7c60b2c2b9320b59.tar.gz
feat: Update Vela version
Updates Vela Version to 3.11.0 and TensorFlow version to 2.15.1 Required keras import to change: from keras.api._v2 import keras needed instead of calling tf.keras Subsequently tf.keras.X needed to change to keras.X Resolves: MLIA-1107 Signed-off-by: Nathan Bailey <nathan.bailey@arm.com> Change-Id: I53bcaa9cdad58b0e6c311c8c6490393d33cb18bc
-rw-r--r--setup.cfg4
-rw-r--r--src/mlia/nn/rewrite/core/rewrite.py9
-rw-r--r--src/mlia/nn/rewrite/core/train.py19
-rw-r--r--src/mlia/nn/rewrite/library/fc_layer.py14
-rw-r--r--src/mlia/nn/select.py8
-rw-r--r--src/mlia/nn/tensorflow/config.py6
-rw-r--r--src/mlia/nn/tensorflow/optimizations/clustering.py16
-rw-r--r--src/mlia/nn/tensorflow/optimizations/pruning.py31
-rw-r--r--src/mlia/nn/tensorflow/tflite_convert.py18
-rw-r--r--src/mlia/nn/tensorflow/utils.py5
-rw-r--r--tests/conftest.py19
-rw-r--r--tests/test_nn_rewrite_core_train.py15
-rw-r--r--tests/test_nn_select.py4
-rw-r--r--tests/test_nn_tensorflow_optimizations_clustering.py8
-rw-r--r--tests/test_nn_tensorflow_optimizations_pruning.py8
-rw-r--r--tests/test_nn_tensorflow_tflite_compat.py12
-rw-r--r--tests/test_nn_tensorflow_tflite_convert.py7
-rw-r--r--tests/test_nn_tensorflow_tflite_metrics.py21
-rw-r--r--tests/test_nn_tensorflow_utils.py9
-rw-r--r--tests/test_target_cortex_a_operators.py12
-rw-r--r--tests/utils/common.py10
21 files changed, 132 insertions, 123 deletions
diff --git a/setup.cfg b/setup.cfg
index 4b6c931..6917747 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -29,9 +29,9 @@ package_dir =
= src
packages = find_namespace:
install_requires =
- tensorflow~=2.14.1
+ tensorflow~=2.15.1
tensorflow-model-optimization~=0.7.5
- ethos-u-vela~=3.10.0
+ ethos-u-vela~=3.11.0
flaky~=3.7.0
requests~=2.31.0
rich~=13.7.0
diff --git a/src/mlia/nn/rewrite/core/rewrite.py b/src/mlia/nn/rewrite/core/rewrite.py
index 8658991..c7d13ba 100644
--- a/src/mlia/nn/rewrite/core/rewrite.py
+++ b/src/mlia/nn/rewrite/core/rewrite.py
@@ -12,7 +12,7 @@ from typing import Any
from typing import Callable
from typing import cast
-import tensorflow as tf
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from mlia.core.errors import ConfigurationError
from mlia.core.reporting import Column
@@ -25,8 +25,9 @@ from mlia.nn.rewrite.core.train import TrainingParameters
from mlia.nn.tensorflow.config import TFLiteModel
from mlia.utils.registry import Registry
+
logger = logging.getLogger(__name__)
-RewriteCallable = Callable[[Any, Any], tf.keras.Model]
+RewriteCallable = Callable[[Any, Any], keras.Model]
class Rewrite:
@@ -37,7 +38,7 @@ class Rewrite:
self.name = name
self.function = rewrite_fn
- def __call__(self, input_shape: Any, output_shape: Any) -> tf.keras.Model:
+ def __call__(self, input_shape: Any, output_shape: Any) -> keras.Model:
"""Perform the rewrite operation using the configured function."""
try:
return self.function(input_shape, output_shape)
@@ -52,7 +53,7 @@ class DynamicallyLoadedRewrite(Rewrite):
def __init__(self, name: str, function_name: str):
"""Initialize."""
- def load_and_run(input_shape: Any, output_shape: Any) -> tf.keras.Model:
+ def load_and_run(input_shape: Any, output_shape: Any) -> keras.Model:
"""Load the function from a file dynamically."""
self.load_function(function_name)
return self.function(input_shape, output_shape)
diff --git a/src/mlia/nn/rewrite/core/train.py b/src/mlia/nn/rewrite/core/train.py
index 72b8f48..60c39ae 100644
--- a/src/mlia/nn/rewrite/core/train.py
+++ b/src/mlia/nn/rewrite/core/train.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2023, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2023-2024, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Sequential trainer."""
# pylint: disable=too-many-locals
@@ -23,6 +23,7 @@ from typing import Literal
import numpy as np
import tensorflow as tf
import tensorflow_model_optimization as tfmot
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from numpy.random import Generator
from mlia.nn.rewrite.core.extract import extract
@@ -383,8 +384,8 @@ def train_in_dir(
model = replace_fn(input_shape, output_shape)
- optimizer = tf.keras.optimizers.Nadam(learning_rate=train_params.learning_rate)
- loss_fn = tf.keras.losses.MeanSquaredError()
+ optimizer = keras.optimizers.Nadam(learning_rate=train_params.learning_rate)
+ loss_fn = keras.losses.MeanSquaredError()
if model_is_quantized:
model = tfmot.quantization.keras.quantize_model(model)
model.compile(optimizer=optimizer, loss=loss_fn, metrics=["mae"])
@@ -403,7 +404,7 @@ def train_in_dir(
* (math.cos(math.pi * current_step / train_params.steps) + 1)
/ 2.0
)
- tf.keras.backend.set_value(optimizer.learning_rate, cd_learning_rate)
+ keras.backend.set_value(optimizer.learning_rate, cd_learning_rate)
def late_decay(
epoch_step: int, logs: Any # pylint: disable=unused-argument
@@ -414,16 +415,16 @@ def train_in_dir(
decay_length = train_params.steps // 5
decay_fraction = min(steps_remaining, decay_length) / decay_length
ld_learning_rate = train_params.learning_rate * decay_fraction
- tf.keras.backend.set_value(optimizer.learning_rate, ld_learning_rate)
+ keras.backend.set_value(optimizer.learning_rate, ld_learning_rate)
assert train_params.learning_rate_schedule in LEARNING_RATE_SCHEDULES, (
f'Learning rate schedule "{train_params.learning_rate_schedule}" '
f"not implemented - expected one of {LEARNING_RATE_SCHEDULES}."
)
if train_params.learning_rate_schedule == "cosine":
- callbacks = [tf.keras.callbacks.LambdaCallback(on_batch_begin=cosine_decay)]
+ callbacks = [keras.callbacks.LambdaCallback(on_batch_begin=cosine_decay)]
elif train_params.learning_rate_schedule == "late":
- callbacks = [tf.keras.callbacks.LambdaCallback(on_batch_begin=late_decay)]
+ callbacks = [keras.callbacks.LambdaCallback(on_batch_begin=late_decay)]
elif train_params.learning_rate_schedule == "constant":
callbacks = []
@@ -474,7 +475,7 @@ def train_in_dir(
def save_as_tflite(
- keras_model: tf.keras.Model,
+ keras_model: keras.Model,
filename: str,
input_name: str,
input_shape: list,
@@ -485,7 +486,7 @@ def save_as_tflite(
"""Save Keras model as TFLite file."""
@contextmanager
- def fixed_input(keras_model: tf.keras.Model, tmp_shape: list) -> GeneratorType:
+ def fixed_input(keras_model: keras.Model, tmp_shape: list) -> GeneratorType:
"""Fix the input shape of the Keras model temporarily.
This avoids artifacts during conversion to TensorFlow Lite.
diff --git a/src/mlia/nn/rewrite/library/fc_layer.py b/src/mlia/nn/rewrite/library/fc_layer.py
index 2480500..041ce85 100644
--- a/src/mlia/nn/rewrite/library/fc_layer.py
+++ b/src/mlia/nn/rewrite/library/fc_layer.py
@@ -1,18 +1,18 @@
-# SPDX-FileCopyrightText: Copyright 2023, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2023-2024, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Example rewrite with one fully connected layer."""
from typing import Any
-import tensorflow as tf
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
-def get_keras_model(input_shape: Any, output_shape: Any) -> tf.keras.Model:
+def get_keras_model(input_shape: Any, output_shape: Any) -> keras.Model:
"""Generate TensorFlow Lite model for rewrite."""
- model = tf.keras.Sequential(
+ model = keras.Sequential(
(
- tf.keras.layers.InputLayer(input_shape=input_shape),
- tf.keras.layers.Reshape([-1]),
- tf.keras.layers.Dense(output_shape),
+ keras.layers.InputLayer(input_shape=input_shape),
+ keras.layers.Reshape([-1]),
+ keras.layers.Dense(output_shape),
)
)
return model
diff --git a/src/mlia/nn/select.py b/src/mlia/nn/select.py
index 20950cc..81a614f 100644
--- a/src/mlia/nn/select.py
+++ b/src/mlia/nn/select.py
@@ -10,7 +10,7 @@ from typing import cast
from typing import List
from typing import NamedTuple
-import tensorflow as tf
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from mlia.core.errors import ConfigurationError
from mlia.nn.common import Optimizer
@@ -91,7 +91,7 @@ class MultiStageOptimizer(Optimizer):
def __init__(
self,
- model: tf.keras.Model,
+ model: keras.Model,
optimizations: list[OptimizerConfiguration],
) -> None:
"""Init MultiStageOptimizer instance."""
@@ -115,7 +115,7 @@ class MultiStageOptimizer(Optimizer):
def get_optimizer(
- model: tf.keras.Model | KerasModel | TFLiteModel,
+ model: keras.Model | KerasModel | TFLiteModel,
config: OptimizerConfiguration | OptimizationSettings | list[OptimizationSettings],
training_parameters: list[dict | None] | None = None,
) -> Optimizer:
@@ -149,7 +149,7 @@ def get_optimizer(
def _get_optimizer(
- model: tf.keras.Model | Path,
+ model: keras.Model | Path,
optimization_settings: OptimizationSettings | list[OptimizationSettings],
training_parameters: list[dict | None] | None = None,
) -> Optimizer:
diff --git a/src/mlia/nn/tensorflow/config.py b/src/mlia/nn/tensorflow/config.py
index 44fbaef..c6fae1c 100644
--- a/src/mlia/nn/tensorflow/config.py
+++ b/src/mlia/nn/tensorflow/config.py
@@ -15,6 +15,7 @@ from typing import List
import numpy as np
import tensorflow as tf
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from mlia.core.context import Context
from mlia.nn.tensorflow.optimizations.quantization import dequantize
@@ -30,6 +31,7 @@ from mlia.nn.tensorflow.utils import is_saved_model
from mlia.nn.tensorflow.utils import is_tflite_model
from mlia.utils.logging import log_action
+
logger = logging.getLogger(__name__)
@@ -57,10 +59,10 @@ class KerasModel(ModelConfiguration):
Supports all models supported by Keras API: saved model, H5, HDF5
"""
- def get_keras_model(self) -> tf.keras.Model:
+ def get_keras_model(self) -> keras.Model:
"""Return associated Keras model."""
try:
- keras_model = tf.keras.models.load_model(self.model_path)
+ keras_model = keras.models.load_model(self.model_path)
except OSError as err:
raise RuntimeError(
f"Unable to load model content in {self.model_path}. "
diff --git a/src/mlia/nn/tensorflow/optimizations/clustering.py b/src/mlia/nn/tensorflow/optimizations/clustering.py
index f9018b3..8e7c4a2 100644
--- a/src/mlia/nn/tensorflow/optimizations/clustering.py
+++ b/src/mlia/nn/tensorflow/optimizations/clustering.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2024, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""
Contains class Clusterer that clusters unique weights per layer to a specified number.
@@ -12,8 +12,8 @@ from __future__ import annotations
from dataclasses import dataclass
from typing import Any
-import tensorflow as tf
import tensorflow_model_optimization as tfmot
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from tensorflow_model_optimization.python.core.clustering.keras.experimental import ( # pylint: disable=no-name-in-module
cluster as experimental_cluster,
)
@@ -50,7 +50,7 @@ class Clusterer(Optimizer):
"""
def __init__(
- self, model: tf.keras.Model, optimizer_configuration: ClusteringConfiguration
+ self, model: keras.Model, optimizer_configuration: ClusteringConfiguration
):
"""Init Clusterer instance."""
self.model = model
@@ -69,8 +69,8 @@ class Clusterer(Optimizer):
}
def _apply_clustering_to_layer(
- self, layer: tf.keras.layers.Layer
- ) -> tf.keras.layers.Layer:
+ self, layer: keras.layers.Layer
+ ) -> keras.layers.Layer:
layers_to_optimize = self.optimizer_configuration.layers_to_optimize
assert layers_to_optimize, "List of the layers to optimize is empty"
@@ -81,7 +81,7 @@ class Clusterer(Optimizer):
return experimental_cluster.cluster_weights(layer, **clustering_params)
def _init_for_clustering(self) -> None:
- # Use `tf.keras.models.clone_model` to apply `apply_clustering_to_layer`
+ # Use `keras.models.clone_model` to apply `apply_clustering_to_layer`
# to the layers of the model
if not self.optimizer_configuration.layers_to_optimize:
clustering_params = self._setup_clustering_params()
@@ -89,7 +89,7 @@ class Clusterer(Optimizer):
self.model, **clustering_params
)
else:
- clustered_model = tf.keras.models.clone_model(
+ clustered_model = keras.models.clone_model(
self.model, clone_function=self._apply_clustering_to_layer
)
@@ -103,6 +103,6 @@ class Clusterer(Optimizer):
self._init_for_clustering()
self._strip_clustering()
- def get_model(self) -> tf.keras.Model:
+ def get_model(self) -> keras.Model:
"""Get model."""
return self.model
diff --git a/src/mlia/nn/tensorflow/optimizations/pruning.py b/src/mlia/nn/tensorflow/optimizations/pruning.py
index a30b301..866e209 100644
--- a/src/mlia/nn/tensorflow/optimizations/pruning.py
+++ b/src/mlia/nn/tensorflow/optimizations/pruning.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2024, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""
Contains class Pruner to prune a model to a specified sparsity.
@@ -15,8 +15,8 @@ from dataclasses import dataclass
from typing import Any
import numpy as np
-import tensorflow as tf
import tensorflow_model_optimization as tfmot
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from tensorflow_model_optimization.python.core.sparsity.keras import ( # pylint: disable=no-name-in-module
prune_registry,
)
@@ -27,6 +27,7 @@ from tensorflow_model_optimization.python.core.sparsity.keras import ( # pylint
from mlia.nn.common import Optimizer
from mlia.nn.common import OptimizerConfiguration
+
logger = logging.getLogger(__name__)
@@ -58,7 +59,7 @@ class PrunableLayerPolicy(tfmot.sparsity.keras.PruningPolicy):
are compatible with the pruning API, and that the model supports pruning.
"""
- def allow_pruning(self, layer: tf.keras.layers.Layer) -> Any:
+ def allow_pruning(self, layer: keras.layers.Layer) -> Any:
"""Allow pruning only for layers that are prunable.
Checks the PruneRegistry in TensorFlow Model Optimization Toolkit.
@@ -71,13 +72,13 @@ class PrunableLayerPolicy(tfmot.sparsity.keras.PruningPolicy):
return layer_is_supported
- def ensure_model_supports_pruning(self, model: tf.keras.Model) -> None:
+ def ensure_model_supports_pruning(self, model: keras.Model) -> None:
"""Ensure that the model contains only supported layers."""
# Check whether the model is a Keras model.
- if not isinstance(model, tf.keras.Model):
+ if not isinstance(model, keras.Model):
raise ValueError(
"Models that are not part of the \
- tf.keras.Model base class \
+ keras.Model base class \
are not supported currently."
)
@@ -99,7 +100,7 @@ class Pruner(Optimizer):
"""
def __init__(
- self, model: tf.keras.Model, optimizer_configuration: PruningConfiguration
+ self, model: keras.Model, optimizer_configuration: PruningConfiguration
):
"""Init Pruner instance."""
self.model = model
@@ -132,9 +133,7 @@ class Pruner(Optimizer):
),
}
- def _apply_pruning_to_layer(
- self, layer: tf.keras.layers.Layer
- ) -> tf.keras.layers.Layer:
+ def _apply_pruning_to_layer(self, layer: keras.layers.Layer) -> keras.layers.Layer:
layers_to_optimize = self.optimizer_configuration.layers_to_optimize
assert layers_to_optimize, "List of the layers to optimize is empty"
@@ -145,7 +144,7 @@ class Pruner(Optimizer):
return tfmot.sparsity.keras.prune_low_magnitude(layer, **pruning_params)
def _init_for_pruning(self) -> None:
- # Use `tf.keras.models.clone_model` to apply `apply_pruning_to_layer`
+ # Use `keras.models.clone_model` to apply `apply_pruning_to_layer`
# to the layers of the model
if not self.optimizer_configuration.layers_to_optimize:
pruning_params = self._setup_pruning_params()
@@ -153,14 +152,14 @@ class Pruner(Optimizer):
self.model, pruning_policy=PrunableLayerPolicy(), **pruning_params
)
else:
- prunable_model = tf.keras.models.clone_model(
+ prunable_model = keras.models.clone_model(
self.model, clone_function=self._apply_pruning_to_layer
)
self.model = prunable_model
def _train_pruning(self) -> None:
- loss_fn = tf.keras.losses.MeanAbsolutePercentageError()
+ loss_fn = keras.losses.MeanAbsolutePercentageError()
self.model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
# Model callbacks
@@ -183,8 +182,8 @@ class Pruner(Optimizer):
continue
for weight in layer.layer.get_prunable_weights():
- nonzero_weights = np.count_nonzero(tf.keras.backend.get_value(weight))
- all_weights = tf.keras.backend.get_value(weight).size
+ nonzero_weights = np.count_nonzero(keras.backend.get_value(weight))
+ all_weights = keras.backend.get_value(weight).size
# Types need to be ignored for this function call because
# np.testing.assert_approx_equal does not have type annotation while the
@@ -205,6 +204,6 @@ class Pruner(Optimizer):
self._assert_sparsity_reached()
self._strip_pruning()
- def get_model(self) -> tf.keras.Model:
+ def get_model(self) -> keras.Model:
"""Get model."""
return self.model
diff --git a/src/mlia/nn/tensorflow/tflite_convert.py b/src/mlia/nn/tensorflow/tflite_convert.py
index d3a833a..29839d6 100644
--- a/src/mlia/nn/tensorflow/tflite_convert.py
+++ b/src/mlia/nn/tensorflow/tflite_convert.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2024, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Support module to call TFLiteConverter."""
from __future__ import annotations
@@ -14,6 +14,7 @@ from typing import Iterable
import numpy as np
import tensorflow as tf
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from mlia.nn.tensorflow.utils import get_tf_tensor_shape
from mlia.nn.tensorflow.utils import is_keras_model
@@ -23,6 +24,7 @@ from mlia.utils.logging import redirect_output
from mlia.utils.proc import Command
from mlia.utils.proc import command_output
+
logger = logging.getLogger(__name__)
@@ -40,21 +42,21 @@ def representative_dataset(
def get_tflite_converter(
- model: tf.keras.Model | str | Path, quantized: bool = False
+ model: keras.Model | str | Path, quantized: bool = False
) -> tf.lite.TFLiteConverter:
"""Configure TensorFlow Lite converter for the provided model."""
if isinstance(model, (str, Path)):
# converter's methods accept string as input parameter
model = str(model)
- if isinstance(model, tf.keras.Model):
+ if isinstance(model, keras.Model):
converter = tf.lite.TFLiteConverter.from_keras_model(model)
input_shape = model.input_shape
elif isinstance(model, str) and is_saved_model(model):
converter = tf.lite.TFLiteConverter.from_saved_model(model)
input_shape = get_tf_tensor_shape(model)
elif isinstance(model, str) and is_keras_model(model):
- keras_model = tf.keras.models.load_model(model)
+ keras_model = keras.models.load_model(model)
input_shape = keras_model.input_shape
converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
else:
@@ -70,9 +72,7 @@ def get_tflite_converter(
return converter
-def convert_to_tflite_bytes(
- model: tf.keras.Model | str, quantized: bool = False
-) -> bytes:
+def convert_to_tflite_bytes(model: keras.Model | str, quantized: bool = False) -> bytes:
"""Convert Keras model to TensorFlow Lite."""
converter = get_tflite_converter(model, quantized)
@@ -83,7 +83,7 @@ def convert_to_tflite_bytes(
def _convert_to_tflite(
- model: tf.keras.Model | str,
+ model: keras.Model | str,
quantized: bool = False,
output_path: Path | None = None,
) -> bytes:
@@ -97,7 +97,7 @@ def _convert_to_tflite(
def convert_to_tflite(
- model: tf.keras.Model | str,
+ model: keras.Model | str,
quantized: bool = False,
output_path: Path | None = None,
input_path: Path | None = None,
diff --git a/src/mlia/nn/tensorflow/utils.py b/src/mlia/nn/tensorflow/utils.py
index 1612447..3ac5064 100644
--- a/src/mlia/nn/tensorflow/utils.py
+++ b/src/mlia/nn/tensorflow/utils.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2024, Arm Limited and/or its affiliates.
# SPDX-FileCopyrightText: Copyright The TensorFlow Authors. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Collection of useful functions for optimizations."""
@@ -8,6 +8,7 @@ from pathlib import Path
from typing import Any
import tensorflow as tf
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
def get_tf_tensor_shape(model: str) -> list:
@@ -30,7 +31,7 @@ def get_tf_tensor_shape(model: str) -> list:
def save_keras_model(
- model: tf.keras.Model, save_path: str | Path, include_optimizer: bool = True
+ model: keras.Model, save_path: str | Path, include_optimizer: bool = True
) -> None:
"""Save Keras model at provided path."""
model.save(save_path, include_optimizer=include_optimizer)
diff --git a/tests/conftest.py b/tests/conftest.py
index 53bfb0c..3d0b832 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -11,6 +11,7 @@ import _pytest
import numpy as np
import pytest
import tensorflow as tf
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from mlia.backend.vela.compiler import compile_model
from mlia.core.context import ExecutionContext
@@ -103,21 +104,21 @@ def fixture_vela_ini_file(
return test_vela_path / "vela.ini"
-def get_test_keras_model() -> tf.keras.Model:
+def get_test_keras_model() -> keras.Model:
"""Return test Keras model."""
- model = tf.keras.Sequential(
+ model = keras.Sequential(
[
- tf.keras.Input(shape=(28, 28, 1), batch_size=1, name="input"),
- tf.keras.layers.Reshape((28, 28, 1)),
- tf.keras.layers.Conv2D(
+ keras.Input(shape=(28, 28, 1), batch_size=1, name="input"),
+ keras.layers.Reshape((28, 28, 1)),
+ keras.layers.Conv2D(
filters=12, kernel_size=(3, 3), activation="relu", name="conv1"
),
- tf.keras.layers.Conv2D(
+ keras.layers.Conv2D(
filters=12, kernel_size=(3, 3), activation="relu", name="conv2"
),
- tf.keras.layers.MaxPool2D(2, 2),
- tf.keras.layers.Flatten(),
- tf.keras.layers.Dense(10, name="output"),
+ keras.layers.MaxPool2D(2, 2),
+ keras.layers.Flatten(),
+ keras.layers.Dense(10, name="output"),
]
)
diff --git a/tests/test_nn_rewrite_core_train.py b/tests/test_nn_rewrite_core_train.py
index 7fb6f85..6d24133 100644
--- a/tests/test_nn_rewrite_core_train.py
+++ b/tests/test_nn_rewrite_core_train.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2023, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2023-2024, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Tests for module mlia.nn.rewrite.core.train."""
# pylint: disable=too-many-arguments
@@ -12,6 +12,7 @@ from typing import Any
import numpy as np
import pytest
import tensorflow as tf
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from mlia.nn.rewrite.core.train import augment_fn_twins
from mlia.nn.rewrite.core.train import AUGMENTATION_PRESETS
@@ -24,7 +25,7 @@ from tests.utils.rewrite import MockTrainingParameters
def replace_fully_connected_with_conv(
input_shape: Any, output_shape: Any
-) -> tf.keras.Model:
+) -> keras.Model:
"""Get a replacement model for the fully connected layer."""
for name, shape in {
"Input": input_shape,
@@ -33,11 +34,11 @@ def replace_fully_connected_with_conv(
if len(shape) != 1:
raise RuntimeError(f"{name}: shape (N,) expected, but it is {input_shape}.")
- model = tf.keras.Sequential(name="RewriteModel")
- model.add(tf.keras.Input(input_shape))
- model.add(tf.keras.layers.Reshape((1, 1, input_shape[0])))
- model.add(tf.keras.layers.Conv2D(filters=output_shape[0], kernel_size=(1, 1)))
- model.add(tf.keras.layers.Reshape(output_shape))
+ model = keras.Sequential(name="RewriteModel")
+ model.add(keras.Input(input_shape))
+ model.add(keras.layers.Reshape((1, 1, input_shape[0])))
+ model.add(keras.layers.Conv2D(filters=output_shape[0], kernel_size=(1, 1)))
+ model.add(keras.layers.Reshape(output_shape))
return model
diff --git a/tests/test_nn_select.py b/tests/test_nn_select.py
index 15abf2d..aac07b4 100644
--- a/tests/test_nn_select.py
+++ b/tests/test_nn_select.py
@@ -10,7 +10,7 @@ from typing import Any
from typing import cast
import pytest
-import tensorflow as tf
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from mlia.core.errors import ConfigurationError
from mlia.nn.rewrite.core.rewrite import RewriteConfiguration
@@ -175,7 +175,7 @@ def test_get_optimizer(
) or isinstance(config, RewriteConfiguration):
model = test_tflite_model
else:
- model = tf.keras.models.load_model(str(test_keras_model))
+ model = keras.models.load_model(str(test_keras_model))
optimizer = get_optimizer(model, config)
assert isinstance(optimizer, expected_type)
assert optimizer.optimization_config() == expected_config
diff --git a/tests/test_nn_tensorflow_optimizations_clustering.py b/tests/test_nn_tensorflow_optimizations_clustering.py
index 72ade58..11036ad 100644
--- a/tests/test_nn_tensorflow_optimizations_clustering.py
+++ b/tests/test_nn_tensorflow_optimizations_clustering.py
@@ -7,8 +7,8 @@ import math
from pathlib import Path
import pytest
-import tensorflow as tf
from flaky import flaky
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from mlia.nn.tensorflow.optimizations.clustering import Clusterer
from mlia.nn.tensorflow.optimizations.clustering import ClusteringConfiguration
@@ -22,8 +22,8 @@ from tests.utils.common import train_model
def _prune_model(
- model: tf.keras.Model, target_sparsity: float, layers_to_prune: list[str] | None
-) -> tf.keras.Model:
+ model: keras.Model, target_sparsity: float, layers_to_prune: list[str] | None
+) -> keras.Model:
x_train, y_train = get_dataset()
batch_size = 1
num_epochs = 1
@@ -100,7 +100,7 @@ def test_cluster_simple_model_fully(
"""Simple MNIST test to see if clustering works correctly."""
target_sparsity = 0.5
- base_model = tf.keras.models.load_model(str(test_keras_model))
+ base_model = keras.models.load_model(str(test_keras_model))
train_model(base_model)
if sparsity_aware:
diff --git a/tests/test_nn_tensorflow_optimizations_pruning.py b/tests/test_nn_tensorflow_optimizations_pruning.py
index 9afc3ff..c942d83 100644
--- a/tests/test_nn_tensorflow_optimizations_pruning.py
+++ b/tests/test_nn_tensorflow_optimizations_pruning.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2024, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Test for module optimizations/pruning."""
from __future__ import annotations
@@ -6,7 +6,7 @@ from __future__ import annotations
from pathlib import Path
import pytest
-import tensorflow as tf
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from numpy.core.numeric import isclose
from mlia.nn.tensorflow.optimizations.pruning import Pruner
@@ -47,7 +47,7 @@ def _test_check_sparsity(base_tflite_metrics: TFLiteMetrics) -> None:
def _get_tflite_metrics(
- path: Path, tflite_fn: str, model: tf.keras.Model
+ path: Path, tflite_fn: str, model: keras.Model
) -> TFLiteMetrics:
"""Save model as TFLiteModel and return metrics."""
temp_file = path / tflite_fn
@@ -70,7 +70,7 @@ def test_prune_simple_model_fully(
batch_size = 1
num_epochs = 1
- base_model = tf.keras.models.load_model(str(test_keras_model))
+ base_model = keras.models.load_model(str(test_keras_model))
train_model(base_model)
base_tflite_metrics = _get_tflite_metrics(
diff --git a/tests/test_nn_tensorflow_tflite_compat.py b/tests/test_nn_tensorflow_tflite_compat.py
index 4ca387c..ee60ff7 100644
--- a/tests/test_nn_tensorflow_tflite_compat.py
+++ b/tests/test_nn_tensorflow_tflite_compat.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2024, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Tests for tflite_compat module."""
from __future__ import annotations
@@ -6,7 +6,7 @@ from __future__ import annotations
from unittest.mock import MagicMock
import pytest
-import tensorflow as tf
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from tensorflow.lite.python import convert
from mlia.nn.tensorflow.tflite_compat import converter_error_data_pb2
@@ -19,11 +19,11 @@ from mlia.nn.tensorflow.tflite_compat import TFLiteConversionErrorCode
def test_not_fully_compatible_model_flex_ops() -> None:
"""Test models that requires TF_SELECT_OPS."""
- model = tf.keras.models.Sequential(
+ model = keras.models.Sequential(
[
- tf.keras.layers.Dense(units=1, input_shape=[1], batch_size=1),
- tf.keras.layers.Dense(units=16, activation="softsign"),
- tf.keras.layers.Dense(units=1),
+ keras.layers.Dense(units=1, input_shape=[1], batch_size=1),
+ keras.layers.Dense(units=16, activation="softsign"),
+ keras.layers.Dense(units=1),
]
)
diff --git a/tests/test_nn_tensorflow_tflite_convert.py b/tests/test_nn_tensorflow_tflite_convert.py
index 3125c04..81655b1 100644
--- a/tests/test_nn_tensorflow_tflite_convert.py
+++ b/tests/test_nn_tensorflow_tflite_convert.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2024, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Test for module utils/test_utils."""
import os
@@ -9,6 +9,7 @@ from unittest.mock import MagicMock
import numpy as np
import pytest
import tensorflow as tf
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from mlia.nn.tensorflow import tflite_convert
from mlia.nn.tensorflow.tflite_convert import convert_to_tflite
@@ -40,14 +41,14 @@ def test_convert_saved_model_to_tflite(test_tf_model: Path) -> None:
def test_convert_keras_to_tflite(test_keras_model: Path) -> None:
"""Test converting Keras model to TensorFlow Lite."""
- keras_model = tf.keras.models.load_model(str(test_keras_model))
+ keras_model = keras.models.load_model(str(test_keras_model))
result = convert_to_tflite_bytes(keras_model)
assert isinstance(result, bytes)
def test_save_tflite_model(tmp_path: Path, test_keras_model: Path) -> None:
"""Test saving TensorFlow Lite model."""
- keras_model = tf.keras.models.load_model(str(test_keras_model))
+ keras_model = keras.models.load_model(str(test_keras_model))
temp_file = tmp_path / "test_model_saving.tflite"
convert_to_tflite(keras_model, output_path=temp_file)
diff --git a/tests/test_nn_tensorflow_tflite_metrics.py b/tests/test_nn_tensorflow_tflite_metrics.py
index e8d7c09..cbb1b63 100644
--- a/tests/test_nn_tensorflow_tflite_metrics.py
+++ b/tests/test_nn_tensorflow_tflite_metrics.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2024, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Test for module utils/tflite_metrics."""
from __future__ import annotations
@@ -12,26 +12,27 @@ from typing import Generator
import numpy as np
import pytest
import tensorflow as tf
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from mlia.nn.tensorflow.tflite_metrics import ReportClusterMode
from mlia.nn.tensorflow.tflite_metrics import TFLiteMetrics
-def _sample_keras_model() -> tf.keras.Model:
+def _sample_keras_model() -> keras.Model:
# Create a sample model
- keras_model = tf.keras.Sequential(
+ keras_model = keras.Sequential(
[
- tf.keras.Input(shape=(8, 8, 3)),
- tf.keras.layers.Conv2D(4, 3),
- tf.keras.layers.DepthwiseConv2D(3),
- tf.keras.layers.Flatten(),
- tf.keras.layers.Dense(8),
+ keras.Input(shape=(8, 8, 3)),
+ keras.layers.Conv2D(4, 3),
+ keras.layers.DepthwiseConv2D(3),
+ keras.layers.Flatten(),
+ keras.layers.Dense(8),
]
)
return keras_model
-def _sparse_binary_keras_model() -> tf.keras.Model:
+def _sparse_binary_keras_model() -> keras.Model:
def get_sparse_weights(shape: list[int]) -> np.ndarray:
weights = np.zeros(shape)
with np.nditer(weights, op_flags=[["writeonly"]]) as weight_it:
@@ -43,7 +44,7 @@ def _sparse_binary_keras_model() -> tf.keras.Model:
keras_model = _sample_keras_model()
# Assign weights to have 0.5 sparsity
for layer in keras_model.layers:
- if not isinstance(layer, tf.keras.layers.Flatten):
+ if not isinstance(layer, keras.layers.Flatten):
weight = layer.weights[0]
weight.assign(get_sparse_weights(weight.shape))
print(layer)
diff --git a/tests/test_nn_tensorflow_utils.py b/tests/test_nn_tensorflow_utils.py
index e356a49..4e0c1e1 100644
--- a/tests/test_nn_tensorflow_utils.py
+++ b/tests/test_nn_tensorflow_utils.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2024, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Test for module utils/test_utils."""
import re
@@ -7,6 +7,7 @@ from pathlib import Path
import numpy as np
import pytest
import tensorflow as tf
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from mlia.nn.tensorflow.tflite_convert import convert_to_tflite
from mlia.nn.tensorflow.utils import check_tflite_datatypes
@@ -19,18 +20,18 @@ from mlia.nn.tensorflow.utils import save_keras_model
def test_save_keras_model(tmp_path: Path, test_keras_model: Path) -> None:
"""Test saving Keras model."""
- keras_model = tf.keras.models.load_model(str(test_keras_model))
+ keras_model = keras.models.load_model(str(test_keras_model))
temp_file = tmp_path / "test_model_saving.h5"
save_keras_model(keras_model, temp_file)
- loaded_model = tf.keras.models.load_model(temp_file)
+ loaded_model = keras.models.load_model(temp_file)
assert loaded_model.summary() == keras_model.summary()
def test_save_tflite_model(tmp_path: Path, test_keras_model: Path) -> None:
"""Test saving TensorFlow Lite model."""
- keras_model = tf.keras.models.load_model(str(test_keras_model))
+ keras_model = keras.models.load_model(str(test_keras_model))
temp_file = tmp_path / "test_model_saving.tflite"
convert_to_tflite(keras_model, output_path=temp_file)
diff --git a/tests/test_target_cortex_a_operators.py b/tests/test_target_cortex_a_operators.py
index 16cdca5..a4cfb2e 100644
--- a/tests/test_target_cortex_a_operators.py
+++ b/tests/test_target_cortex_a_operators.py
@@ -1,10 +1,10 @@
-# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2024, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Tests for Cortex-A operator compatibility."""
from pathlib import Path
import pytest
-import tensorflow as tf
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
from mlia.nn.tensorflow.tflite_convert import convert_to_tflite_bytes
from mlia.target.cortex_a.config import CortexAConfiguration
@@ -42,13 +42,13 @@ def test_get_cortex_a_compatibility_info_not_compatible(
monkeypatch: pytest.MonkeyPatch,
) -> None:
"""Construct and test a NOT fully compatible TensorFlow Lite model."""
- keras_model = tf.keras.Sequential(
+ keras_model = keras.Sequential(
[
- tf.keras.Input(shape=(28, 28, 1), batch_size=1, name="input"),
- tf.keras.layers.Conv2D(
+ keras.Input(shape=(28, 28, 1), batch_size=1, name="input"),
+ keras.layers.Conv2D(
filters=12, kernel_size=(3, 3), activation="softmax", name="conv1"
),
- tf.keras.layers.LeakyReLU(),
+ keras.layers.LeakyReLU(),
]
)
keras_model.compile(optimizer="sgd", loss="mean_squared_error")
diff --git a/tests/utils/common.py b/tests/utils/common.py
index c29b47c..eafa31b 100644
--- a/tests/utils/common.py
+++ b/tests/utils/common.py
@@ -1,4 +1,4 @@
-# SPDX-FileCopyrightText: Copyright 2022-2023, Arm Limited and/or its affiliates.
+# SPDX-FileCopyrightText: Copyright 2022-2024, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""Common test utils module."""
from __future__ import annotations
@@ -6,12 +6,12 @@ from __future__ import annotations
from pathlib import Path
import numpy as np
-import tensorflow as tf
+from keras.api._v2 import keras # Temporary workaround for now: MLIA-1107
def get_dataset() -> tuple[np.ndarray, np.ndarray]:
"""Return sample dataset."""
- mnist = tf.keras.datasets.mnist
+ mnist = keras.datasets.mnist
(x_train, y_train), _ = mnist.load_data()
x_train = x_train / 255.0
@@ -22,11 +22,11 @@ def get_dataset() -> tuple[np.ndarray, np.ndarray]:
return x_train, y_train
-def train_model(model: tf.keras.Model) -> None:
+def train_model(model: keras.Model) -> None:
"""Train model using sample dataset."""
num_epochs = 1
- loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
+ loss_fn = keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer="adam", loss=loss_fn, metrics=["accuracy"])
x_train, y_train = get_dataset()