aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitrii Agibov <dmitrii.agibov@arm.com>2022-10-07 11:39:37 +0100
committerDmitrii Agibov <dmitrii.agibov@arm.com>2022-10-07 11:40:21 +0100
commit3083f7ee68ce08147db08fca2474e5f4712fc8d7 (patch)
treec52e668c01a6a1041c08190e52a15944fd65b453
parentbb7fb49484bb3687041061b2fdbbfae3959be54b (diff)
downloadmlia-3083f7ee68ce08147db08fca2474e5f4712fc8d7.tar.gz
MLIA-607 Update documentation and comments
Use "TensorFlow Lite" instead of "TFLite" in documentation and comments Change-Id: Ie4450d72fb2e5261d152d72ab8bd94c3da914c46
-rw-r--r--README.md8
-rw-r--r--RELEASES.md4
-rw-r--r--src/mlia/cli/commands.py14
-rw-r--r--src/mlia/cli/options.py10
-rw-r--r--src/mlia/devices/ethosu/advice_generation.py2
-rw-r--r--src/mlia/devices/ethosu/advisor.py4
-rw-r--r--src/mlia/nn/tensorflow/config.py16
-rw-r--r--src/mlia/nn/tensorflow/tflite_metrics.py10
-rw-r--r--src/mlia/nn/tensorflow/utils.py10
-rw-r--r--src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json4
-rw-r--r--src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json4
-rw-r--r--src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json2
-rw-r--r--src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json2
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json2
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json2
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U65-Dedicated_Sram-TA/backend-config.json2
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json2
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json2
-rw-r--r--src/mlia/tools/vela_wrapper.py4
-rw-r--r--tests/conftest.py8
-rw-r--r--tests/test_devices_ethosu_advice_generation.py4
-rw-r--r--tests/test_nn_tensorflow_config.py11
-rw-r--r--tests/test_nn_tensorflow_tflite_metrics.py6
-rw-r--r--tests/test_nn_tensorflow_utils.py4
24 files changed, 70 insertions, 67 deletions
diff --git a/README.md b/README.md
index 28473d8..7ceefb3 100644
--- a/README.md
+++ b/README.md
@@ -190,9 +190,9 @@ mlia operators --target-profile ethos-u55-256 ~/models/mobilenet_v1_1.0_224_quan
#### *Arguments*
-##### TFLite model options
+##### TensorFlow Lite model options
-* model: Input model in TFLite format [required].
+* model: Input model in TensorFlow Lite format [required].
##### Target profile options
@@ -234,9 +234,9 @@ mlia performance ~/models/mobilenet_v1_1.0_224_quant.tflite \
#### *Arguments*
-##### TFLite model options
+##### TensorFlow Lite model options
-* model: Input model in TFLite format [required].
+* model: Input model in TensorFlow Lite format [required].
##### Target profile options
diff --git a/RELEASES.md b/RELEASES.md
index 174d6cd..691772a 100644
--- a/RELEASES.md
+++ b/RELEASES.md
@@ -29,8 +29,8 @@ TensorFlow™ is a trademark of Google® LLC.
### Issues fixed
-* Fix the issue that no performance information is shown for TFLite files when
- the mode 'all_tests' is used (MLIA-552)
+* Fix the issue that no performance information is shown for
+ TensorFlow Lite files when the mode 'all_tests' is used (MLIA-552)
* Specify cache arena size in the Vela memory profiles (MLIA-316)
### Internal changes
diff --git a/src/mlia/cli/commands.py b/src/mlia/cli/commands.py
index 5dd39f9..e044e1a 100644
--- a/src/mlia/cli/commands.py
+++ b/src/mlia/cli/commands.py
@@ -50,7 +50,7 @@ def all_tests(
This command runs a series of tests in order to generate a
comprehensive report/advice:
- - converts the input Keras model into TFLite format
+ - converts the input Keras model into TensorFlow Lite format
- checks the model for operator compatibility on the specified device
- applies optimizations to the model and estimates the resulting performance
on both the original and the optimized models
@@ -112,14 +112,14 @@ def operators(
:param ctx: execution context
:param target_profile: target profile identifier. Will load appropriate parameters
from the profile.json file based on this argument.
- :param model: path to the model, which can be TFLite or Keras
+ :param model: path to the model, which can be TensorFlow Lite or Keras
:param output: path to the file where the report will be saved
:param supported_ops_report: if True then generates supported operators
report in current directory and exits
Example:
Run command for the target profile ethos-u55-256 and the provided
- TFLite model and print report on the standard output
+ TensorFlow Lite model and print report on the standard output
>>> from mlia.api import ExecutionContext
>>> from mlia.cli.logging import setup_logging
@@ -161,13 +161,13 @@ def performance(
:param ctx: execution context
:param target_profile: target profile identifier. Will load appropriate parameters
from the profile.json file based on this argument.
- :param model: path to the model, which can be TFLite or Keras
+ :param model: path to the model, which can be TensorFlow Lite or Keras
:param output: path to the file where the report will be saved
:param evaluate_on: list of the backends to use for evaluation
Example:
Run command for the target profile ethos-u55-256 and
- the provided TFLite model and print report on the standard output
+ the provided TensorFlow Lite model and print report on the standard output
>>> from mlia.api import ExecutionContext
>>> from mlia.cli.logging import setup_logging
@@ -205,7 +205,7 @@ def optimization(
:param ctx: execution context
:param target: target profile identifier. Will load appropriate parameters
from the profile.json file based on this argument.
- :param model: path to the TFLite model
+ :param model: path to the TensorFlow Lite model
:param optimization_type: list of the optimization techniques separated
by comma, e.g. 'pruning,clustering'
:param optimization_target: list of the corresponding targets for
@@ -217,7 +217,7 @@ def optimization(
Example:
Run command for the target profile ethos-u55-256 and
- the provided TFLite model and print report on the standard output
+ the provided TensorFlow Lite model and print report on the standard output
>>> from mlia.cli.logging import setup_logging
>>> setup_logging()
diff --git a/src/mlia/cli/options.py b/src/mlia/cli/options.py
index f7f95c0..e5e85f0 100644
--- a/src/mlia/cli/options.py
+++ b/src/mlia/cli/options.py
@@ -62,15 +62,17 @@ def add_multi_optimization_options(parser: argparse.ArgumentParser) -> None:
def add_optional_tflite_model_options(parser: argparse.ArgumentParser) -> None:
"""Add optional model specific options."""
- model_group = parser.add_argument_group("TFLite model options")
+ model_group = parser.add_argument_group("TensorFlow Lite model options")
# make model parameter optional
- model_group.add_argument("model", nargs="?", help="TFLite model (optional)")
+ model_group.add_argument(
+ "model", nargs="?", help="TensorFlow Lite model (optional)"
+ )
def add_tflite_model_options(parser: argparse.ArgumentParser) -> None:
"""Add model specific options."""
- model_group = parser.add_argument_group("TFLite model options")
- model_group.add_argument("model", help="TFLite model")
+ model_group = parser.add_argument_group("TensorFlow Lite model options")
+ model_group.add_argument("model", help="TensorFlow Lite model")
def add_output_options(parser: argparse.ArgumentParser) -> None:
diff --git a/src/mlia/devices/ethosu/advice_generation.py b/src/mlia/devices/ethosu/advice_generation.py
index dee1650..8a38d2c 100644
--- a/src/mlia/devices/ethosu/advice_generation.py
+++ b/src/mlia/devices/ethosu/advice_generation.py
@@ -196,7 +196,7 @@ class EthosUStaticAdviceProducer(ContextAwareAdviceProducer):
Advice(
[
"For better performance, make sure that all the operators "
- "of your final TFLite model are supported by the NPU.",
+ "of your final TensorFlow Lite model are supported by the NPU.",
]
+ self.context.action_resolver.operator_compatibility_details()
)
diff --git a/src/mlia/devices/ethosu/advisor.py b/src/mlia/devices/ethosu/advisor.py
index be58de7..2c25f6c 100644
--- a/src/mlia/devices/ethosu/advisor.py
+++ b/src/mlia/devices/ethosu/advisor.py
@@ -52,10 +52,10 @@ class EthosUInferenceAdvisor(DefaultInferenceAdvisor):
# Performance and optimization are mutually exclusive.
# Decide which one to use (taking into account the model format).
if is_tflite_model(model):
- # TFLite models do not support optimization (only performance)!
+ # TensorFlow Lite models do not support optimization (only performance)!
if context.advice_category == AdviceCategory.OPTIMIZATION:
raise Exception(
- "Command 'optimization' is not supported for TFLite files."
+ "Command 'optimization' is not supported for TensorFlow Lite files."
)
if AdviceCategory.PERFORMANCE in context.advice_category:
collectors.append(EthosUPerformance(model, device, backends))
diff --git a/src/mlia/nn/tensorflow/config.py b/src/mlia/nn/tensorflow/config.py
index 6ee32e7..03d1d0f 100644
--- a/src/mlia/nn/tensorflow/config.py
+++ b/src/mlia/nn/tensorflow/config.py
@@ -31,7 +31,7 @@ class ModelConfiguration:
def convert_to_tflite(
self, tflite_model_path: str | Path, quantized: bool = False
) -> TFLiteModel:
- """Convert model to TFLite format."""
+ """Convert model to TensorFlow Lite format."""
raise NotImplementedError()
def convert_to_keras(self, keras_model_path: str | Path) -> KerasModel:
@@ -52,8 +52,8 @@ class KerasModel(ModelConfiguration):
def convert_to_tflite(
self, tflite_model_path: str | Path, quantized: bool = False
) -> TFLiteModel:
- """Convert model to TFLite format."""
- logger.info("Converting Keras to TFLite ...")
+ """Convert model to TensorFlow Lite format."""
+ logger.info("Converting Keras to TensorFlow Lite ...")
converted_model = convert_to_tflite(self.get_keras_model(), quantized)
logger.info("Done\n")
@@ -71,7 +71,7 @@ class KerasModel(ModelConfiguration):
class TFLiteModel(ModelConfiguration): # pylint: disable=abstract-method
- """TFLite model configuration."""
+ """TensorFlow Lite model configuration."""
def input_details(self) -> list[dict]:
"""Get model's input details."""
@@ -81,7 +81,7 @@ class TFLiteModel(ModelConfiguration): # pylint: disable=abstract-method
def convert_to_tflite(
self, tflite_model_path: str | Path, quantized: bool = False
) -> TFLiteModel:
- """Convert model to TFLite format."""
+ """Convert model to TensorFlow Lite format."""
return self
@@ -94,7 +94,7 @@ class TfModel(ModelConfiguration): # pylint: disable=abstract-method
def convert_to_tflite(
self, tflite_model_path: str | Path, quantized: bool = False
) -> TFLiteModel:
- """Convert model to TFLite format."""
+ """Convert model to TensorFlow Lite format."""
converted_model = convert_tf_to_tflite(self.model_path, quantized)
save_tflite_model(converted_model, tflite_model_path)
@@ -114,12 +114,12 @@ def get_model(model: str | Path) -> ModelConfiguration:
raise Exception(
"The input model format is not supported"
- "(supported formats: TFLite, Keras, TensorFlow saved model)!"
+ "(supported formats: TensorFlow Lite, Keras, TensorFlow saved model)!"
)
def get_tflite_model(model: str | Path, ctx: Context) -> TFLiteModel:
- """Convert input model to TFLite and returns TFLiteModel object."""
+ """Convert input model to TensorFlow Lite and returns TFLiteModel object."""
tflite_model_path = ctx.get_model_path("converted_model.tflite")
converted_model = get_model(model)
diff --git a/src/mlia/nn/tensorflow/tflite_metrics.py b/src/mlia/nn/tensorflow/tflite_metrics.py
index 0af7500..d7ae2a4 100644
--- a/src/mlia/nn/tensorflow/tflite_metrics.py
+++ b/src/mlia/nn/tensorflow/tflite_metrics.py
@@ -1,7 +1,7 @@
# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""
-Contains class TFLiteMetrics to calculate metrics from a TFLite file.
+Contains class TFLiteMetrics to calculate metrics from a TensorFlow Lite file.
These metrics include:
* Sparsity (per layer and overall)
@@ -102,7 +102,7 @@ class ReportClusterMode(Enum):
class TFLiteMetrics:
- """Helper class to calculate metrics from a TFLite file.
+ """Helper class to calculate metrics from a TensorFlow Lite file.
Metrics include:
* sparsity (per-layer and overall)
@@ -111,12 +111,12 @@ class TFLiteMetrics:
"""
def __init__(self, tflite_file: str, ignore_list: list[str] | None = None) -> None:
- """Load the TFLite file and filter layers."""
+ """Load the TensorFlow Lite file and filter layers."""
self.tflite_file = tflite_file
if ignore_list is None:
ignore_list = DEFAULT_IGNORE_LIST
self.ignore_list = [ignore.casefold() for ignore in ignore_list]
- # Initialize the TFLite interpreter with the model file
+ # Initialize the TensorFlow Lite interpreter with the model file
self.interpreter = tf.lite.Interpreter(
model_path=tflite_file, experimental_preserve_all_tensors=True
)
@@ -218,7 +218,7 @@ class TFLiteMetrics:
"""Print a summary of all the model information."""
print(f"Model file: {self.tflite_file}")
print("#" * 80)
- print(" " * 28 + "### TFLITE SUMMARY ###")
+ print(" " * 28 + "### TENSORFLOW LITE SUMMARY ###")
print(f"File: {os.path.abspath(self.tflite_file)}")
print("Input(s):")
self._print_in_outs(self.interpreter.get_input_details(), verbose)
diff --git a/src/mlia/nn/tensorflow/utils.py b/src/mlia/nn/tensorflow/utils.py
index 6250f56..7970329 100644
--- a/src/mlia/nn/tensorflow/utils.py
+++ b/src/mlia/nn/tensorflow/utils.py
@@ -63,7 +63,7 @@ def representative_tf_dataset(model: str) -> Callable:
def convert_to_tflite(model: tf.keras.Model, quantized: bool = False) -> Interpreter:
- """Convert Keras model to TFLite."""
+ """Convert Keras model to TensorFlow Lite."""
if not isinstance(model, tf.keras.Model):
raise Exception("Invalid model type")
@@ -83,7 +83,7 @@ def convert_to_tflite(model: tf.keras.Model, quantized: bool = False) -> Interpr
def convert_tf_to_tflite(model: str, quantized: bool = False) -> Interpreter:
- """Convert TensorFlow model to TFLite."""
+ """Convert TensorFlow model to TensorFlow Lite."""
if not isinstance(model, str):
raise Exception("Invalid model type")
@@ -109,15 +109,15 @@ def save_keras_model(model: tf.keras.Model, save_path: str | Path) -> None:
def save_tflite_model(model: tf.lite.TFLiteConverter, save_path: str | Path) -> None:
- """Save TFLite model at provided path."""
+ """Save TensorFlow Lite model at provided path."""
with open(save_path, "wb") as file:
file.write(model)
def is_tflite_model(model: str | Path) -> bool:
- """Check if model type is supported by TFLite API.
+ """Check if model type is supported by TensorFlow Lite API.
- TFLite model is indicated by the model file extension .tflite
+ TensorFlow Lite model is indicated by the model file extension .tflite
"""
model_path = Path(model)
return model_path.suffix == ".tflite"
diff --git a/src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json b/src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json
index 5c44ebc..7bc12c7 100644
--- a/src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json
+++ b/src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json
@@ -16,7 +16,7 @@
"run": [
{
"name": "--data",
- "description": "Full file name for a custom model. Model must be in TFLite format compiled with Vela.",
+ "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
"values": [],
"alias": "input_file"
},
@@ -52,7 +52,7 @@
"run": [
{
"name": "--data",
- "description": "Full file name for a custom model. Model must be in TFLite format compiled with Vela.",
+ "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
"values": [],
"alias": "input_file"
},
diff --git a/src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json b/src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json
index 41d2fd0..c27c6f5 100644
--- a/src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json
+++ b/src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json
@@ -16,7 +16,7 @@
"run": [
{
"name": "--data",
- "description": "Full file name for a custom model. Model must be in TFLite format compiled with Vela.",
+ "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
"values": [],
"alias": "input_file"
},
@@ -52,7 +52,7 @@
"run": [
{
"name": "--data",
- "description": "Full file name for a custom model. Model must be in TFLite format compiled with Vela.",
+ "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
"values": [],
"alias": "input_file"
},
diff --git a/src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json b/src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json
index 3ea9a6a..bbcadfd 100644
--- a/src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json
+++ b/src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json
@@ -16,7 +16,7 @@
"run": [
{
"name": "--data",
- "description": "Full file name for a custom model. Model must be in TFLite format compiled with Vela.",
+ "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
"values": [],
"alias": "input_file"
},
diff --git a/src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json b/src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json
index d043a2d..e0b1d1d 100644
--- a/src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json
+++ b/src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json
@@ -16,7 +16,7 @@
"run": [
{
"name": "--data",
- "description": "Full file name for a custom model. Model must be in TFLite format compiled with Vela.",
+ "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
"values": [],
"alias": "input_file"
},
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json b/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json
index 7ee5e00..4856d27 100644
--- a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json
+++ b/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json
@@ -1,7 +1,7 @@
[
{
"name": "Generic Inference Runner: Ethos-U55/65 Shared SRAM",
- "description": "This application allows running inferences using custom NN TFLite models on Ethos-U. No data pre-/post-processing is executed.",
+ "description": "This application allows running inferences using custom NN TensorFlow Lite models on Ethos-U. No data pre-/post-processing is executed.",
"supported_systems": [
{
"name": "Corstone-300: Cortex-M55+Ethos-U55"
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json b/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json
index 51ff429..3b512ff 100644
--- a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json
+++ b/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json
@@ -1,7 +1,7 @@
[
{
"name": "Generic Inference Runner: Ethos-U55 SRAM",
- "description": "This application allows running inferences using custom NN TFLite models on Ethos-U. No data pre-/post-processing is executed.",
+ "description": "This application allows running inferences using custom NN TensorFlow Lite models on Ethos-U. No data pre-/post-processing is executed.",
"supported_systems": [
{
"name": "Corstone-300: Cortex-M55+Ethos-U55"
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U65-Dedicated_Sram-TA/backend-config.json b/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U65-Dedicated_Sram-TA/backend-config.json
index b59c85e..fc0569f 100644
--- a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U65-Dedicated_Sram-TA/backend-config.json
+++ b/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U65-Dedicated_Sram-TA/backend-config.json
@@ -1,7 +1,7 @@
[
{
"name": "Generic Inference Runner: Ethos-U65 Dedicated SRAM",
- "description": "This application allows running inferences using custom NN TFLite models on Ethos-U. No data pre-/post-processing is executed.",
+ "description": "This application allows running inferences using custom NN TensorFlow Lite models on Ethos-U. No data pre-/post-processing is executed.",
"supported_systems": [
{
"name": "Corstone-300: Cortex-M55+Ethos-U65"
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json b/src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json
index 69c5e60..0bdbd27 100644
--- a/src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json
+++ b/src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json
@@ -1,7 +1,7 @@
[
{
"name": "Generic Inference Runner: Ethos-U55/65 Shared SRAM",
- "description": "This application allows running inferences using custom NN TFLite models on Ethos-U. No data pre-/post-processing is executed.",
+ "description": "This application allows running inferences using custom NN TensorFlow Lite models on Ethos-U. No data pre-/post-processing is executed.",
"supported_systems": [
{
"name": "Corstone-310: Cortex-M85+Ethos-U55"
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json b/src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json
index fbe4a16..a56e49d 100644
--- a/src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json
+++ b/src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json
@@ -1,7 +1,7 @@
[
{
"name": "Generic Inference Runner: Ethos-U55 SRAM",
- "description": "This application allows running inferences using custom NN TFLite models on Ethos-U. No data pre-/post-processing is executed.",
+ "description": "This application allows running inferences using custom NN TensorFlow Lite models on Ethos-U. No data pre-/post-processing is executed.",
"supported_systems": [
{
"name": "Corstone-310: Cortex-M85+Ethos-U55"
diff --git a/src/mlia/tools/vela_wrapper.py b/src/mlia/tools/vela_wrapper.py
index 47c15e9..00d2f2c 100644
--- a/src/mlia/tools/vela_wrapper.py
+++ b/src/mlia/tools/vela_wrapper.py
@@ -275,7 +275,7 @@ class VelaCompiler: # pylint: disable=too-many-instance-attributes
@staticmethod
def _read_model(model: str | Path) -> tuple[Graph, NetworkType]:
- """Read TFLite model."""
+ """Read TensorFlow Lite model."""
try:
model_path = str(model) if isinstance(model, Path) else model
@@ -448,7 +448,7 @@ def run_on_npu(operator: Op) -> NpuSupported:
a particular operator is supported to run on NPU.
There are two groups of checks:
- - general TFLite constraints
+ - general TensorFlow Lite constraints
- operator specific constraints
If an operator is not supported on NPU then this function
diff --git a/tests/conftest.py b/tests/conftest.py
index 1cb3dcd..b1f32dc 100644
--- a/tests/conftest.py
+++ b/tests/conftest.py
@@ -176,23 +176,23 @@ def fixture_test_keras_model(test_models_path: Path) -> Path:
@pytest.fixture(scope="session", name="test_tflite_model")
def fixture_test_tflite_model(test_models_path: Path) -> Path:
- """Return test TFLite model."""
+ """Return test TensorFlow Lite model."""
return test_models_path / "test_model.tflite"
@pytest.fixture(scope="session", name="test_tflite_vela_model")
def fixture_test_tflite_vela_model(test_models_path: Path) -> Path:
- """Return test Vela-optimized TFLite model."""
+ """Return test Vela-optimized TensorFlow Lite model."""
return test_models_path / "test_model_vela.tflite"
@pytest.fixture(scope="session", name="test_tf_model")
def fixture_test_tf_model(test_models_path: Path) -> Path:
- """Return test TFLite model."""
+ """Return test TensorFlow Lite model."""
return test_models_path / "tf_model_test_model"
@pytest.fixture(scope="session", name="test_tflite_invalid_model")
def fixture_test_tflite_invalid_model(test_models_path: Path) -> Path:
- """Return test invalid TFLite model."""
+ """Return test invalid TensorFlow Lite model."""
return test_models_path / "invalid.tflite"
diff --git a/tests/test_devices_ethosu_advice_generation.py b/tests/test_devices_ethosu_advice_generation.py
index 5a49089..21a3667 100644
--- a/tests/test_devices_ethosu_advice_generation.py
+++ b/tests/test_devices_ethosu_advice_generation.py
@@ -444,7 +444,7 @@ def test_ethosu_advice_producer(
Advice(
[
"For better performance, make sure that all the operators "
- "of your final TFLite model are supported by the NPU.",
+ "of your final TensorFlow Lite model are supported by the NPU.",
]
)
],
@@ -456,7 +456,7 @@ def test_ethosu_advice_producer(
Advice(
[
"For better performance, make sure that all the operators "
- "of your final TFLite model are supported by the NPU.",
+ "of your final TensorFlow Lite model are supported by the NPU.",
"For more details, run: mlia operators --help",
]
)
diff --git a/tests/test_nn_tensorflow_config.py b/tests/test_nn_tensorflow_config.py
index 1ac9f97..1a6fbe3 100644
--- a/tests/test_nn_tensorflow_config.py
+++ b/tests/test_nn_tensorflow_config.py
@@ -14,7 +14,7 @@ from mlia.nn.tensorflow.config import TfModel
def test_convert_keras_to_tflite(tmp_path: Path, test_keras_model: Path) -> None:
- """Test Keras to TFLite conversion."""
+ """Test Keras to TensorFlow Lite conversion."""
keras_model = KerasModel(test_keras_model)
tflite_model_path = tmp_path / "test.tflite"
@@ -25,7 +25,7 @@ def test_convert_keras_to_tflite(tmp_path: Path, test_keras_model: Path) -> None
def test_convert_tf_to_tflite(tmp_path: Path, test_tf_model: Path) -> None:
- """Test TensorFlow saved model to TFLite conversion."""
+ """Test TensorFlow saved model to TensorFlow Lite conversion."""
tf_model = TfModel(test_tf_model)
tflite_model_path = tmp_path / "test.tflite"
@@ -47,7 +47,8 @@ def test_convert_tf_to_tflite(tmp_path: Path, test_tf_model: Path) -> None:
pytest.raises(
Exception,
match="The input model format is not supported"
- r"\(supported formats: TFLite, Keras, TensorFlow saved model\)!",
+ r"\(supported formats: TensorFlow Lite, Keras, "
+ r"TensorFlow saved model\)!",
),
),
],
@@ -55,7 +56,7 @@ def test_convert_tf_to_tflite(tmp_path: Path, test_tf_model: Path) -> None:
def test_get_model_file(
model_path: str, expected_type: type, expected_error: Any
) -> None:
- """Test TFLite model type."""
+ """Test TensorFlow Lite model type."""
with expected_error:
model = get_model(model_path)
assert isinstance(model, expected_type)
@@ -67,6 +68,6 @@ def test_get_model_file(
def test_get_model_dir(
test_models_path: Path, model_path: str, expected_type: type
) -> None:
- """Test TFLite model type."""
+ """Test TensorFlow Lite model type."""
model = get_model(str(test_models_path / model_path))
assert isinstance(model, expected_type)
diff --git a/tests/test_nn_tensorflow_tflite_metrics.py b/tests/test_nn_tensorflow_tflite_metrics.py
index ca4ab55..0e4c79c 100644
--- a/tests/test_nn_tensorflow_tflite_metrics.py
+++ b/tests/test_nn_tensorflow_tflite_metrics.py
@@ -53,7 +53,7 @@ def _sparse_binary_keras_model() -> tf.keras.Model:
@pytest.fixture(scope="class", name="tflite_file")
def fixture_tflite_file() -> Generator:
- """Generate temporary TFLite file for tests."""
+ """Generate temporary TensorFlow Lite file for tests."""
converter = tf.lite.TFLiteConverter.from_keras_model(_sparse_binary_keras_model())
tflite_model = converter.convert()
with tempfile.TemporaryDirectory() as tmp_dir:
@@ -64,7 +64,7 @@ def fixture_tflite_file() -> Generator:
@pytest.fixture(scope="function", name="metrics")
def fixture_metrics(tflite_file: str) -> TFLiteMetrics:
- """Generate metrics file for a given TFLite model."""
+ """Generate metrics file for a given TensorFlow Lite model."""
return TFLiteMetrics(tflite_file)
@@ -74,7 +74,7 @@ class TestTFLiteMetrics:
@staticmethod
def test_sparsity(metrics: TFLiteMetrics) -> None:
"""Test sparsity."""
- # Create new instance with a sample TFLite file
+ # Create new instance with a sample TensorFlow Lite file
# Check sparsity calculation
sparsity_per_layer = metrics.sparsity_per_layer()
for name, sparsity in sparsity_per_layer.items():
diff --git a/tests/test_nn_tensorflow_utils.py b/tests/test_nn_tensorflow_utils.py
index 6d27299..199c7db 100644
--- a/tests/test_nn_tensorflow_utils.py
+++ b/tests/test_nn_tensorflow_utils.py
@@ -15,7 +15,7 @@ from mlia.nn.tensorflow.utils import save_tflite_model
def test_convert_to_tflite(test_keras_model: Path) -> None:
- """Test converting Keras model to TFLite."""
+ """Test converting Keras model to TensorFlow Lite."""
keras_model = tf.keras.models.load_model(str(test_keras_model))
tflite_model = convert_to_tflite(keras_model)
@@ -34,7 +34,7 @@ def test_save_keras_model(tmp_path: Path, test_keras_model: Path) -> None:
def test_save_tflite_model(tmp_path: Path, test_keras_model: Path) -> None:
- """Test saving TFLite model."""
+ """Test saving TensorFlow Lite model."""
keras_model = tf.keras.models.load_model(str(test_keras_model))
tflite_model = convert_to_tflite(keras_model)