aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDmitrii Agibov <dmitrii.agibov@arm.com>2022-10-07 11:39:37 +0100
committerDmitrii Agibov <dmitrii.agibov@arm.com>2022-10-07 11:40:21 +0100
commit3083f7ee68ce08147db08fca2474e5f4712fc8d7 (patch)
treec52e668c01a6a1041c08190e52a15944fd65b453 /src
parentbb7fb49484bb3687041061b2fdbbfae3959be54b (diff)
downloadmlia-3083f7ee68ce08147db08fca2474e5f4712fc8d7.tar.gz
MLIA-607 Update documentation and comments
Use "TensorFlow Lite" instead of "TFLite" in documentation and comments Change-Id: Ie4450d72fb2e5261d152d72ab8bd94c3da914c46
Diffstat (limited to 'src')
-rw-r--r--src/mlia/cli/commands.py14
-rw-r--r--src/mlia/cli/options.py10
-rw-r--r--src/mlia/devices/ethosu/advice_generation.py2
-rw-r--r--src/mlia/devices/ethosu/advisor.py4
-rw-r--r--src/mlia/nn/tensorflow/config.py16
-rw-r--r--src/mlia/nn/tensorflow/tflite_metrics.py10
-rw-r--r--src/mlia/nn/tensorflow/utils.py10
-rw-r--r--src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json4
-rw-r--r--src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json4
-rw-r--r--src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json2
-rw-r--r--src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json2
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json2
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json2
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U65-Dedicated_Sram-TA/backend-config.json2
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json2
-rw-r--r--src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json2
-rw-r--r--src/mlia/tools/vela_wrapper.py4
17 files changed, 47 insertions, 45 deletions
diff --git a/src/mlia/cli/commands.py b/src/mlia/cli/commands.py
index 5dd39f9..e044e1a 100644
--- a/src/mlia/cli/commands.py
+++ b/src/mlia/cli/commands.py
@@ -50,7 +50,7 @@ def all_tests(
This command runs a series of tests in order to generate a
comprehensive report/advice:
- - converts the input Keras model into TFLite format
+ - converts the input Keras model into TensorFlow Lite format
- checks the model for operator compatibility on the specified device
- applies optimizations to the model and estimates the resulting performance
on both the original and the optimized models
@@ -112,14 +112,14 @@ def operators(
:param ctx: execution context
:param target_profile: target profile identifier. Will load appropriate parameters
from the profile.json file based on this argument.
- :param model: path to the model, which can be TFLite or Keras
+ :param model: path to the model, which can be TensorFlow Lite or Keras
:param output: path to the file where the report will be saved
:param supported_ops_report: if True then generates supported operators
report in current directory and exits
Example:
Run command for the target profile ethos-u55-256 and the provided
- TFLite model and print report on the standard output
+ TensorFlow Lite model and print report on the standard output
>>> from mlia.api import ExecutionContext
>>> from mlia.cli.logging import setup_logging
@@ -161,13 +161,13 @@ def performance(
:param ctx: execution context
:param target_profile: target profile identifier. Will load appropriate parameters
from the profile.json file based on this argument.
- :param model: path to the model, which can be TFLite or Keras
+ :param model: path to the model, which can be TensorFlow Lite or Keras
:param output: path to the file where the report will be saved
:param evaluate_on: list of the backends to use for evaluation
Example:
Run command for the target profile ethos-u55-256 and
- the provided TFLite model and print report on the standard output
+ the provided TensorFlow Lite model and print report on the standard output
>>> from mlia.api import ExecutionContext
>>> from mlia.cli.logging import setup_logging
@@ -205,7 +205,7 @@ def optimization(
:param ctx: execution context
:param target: target profile identifier. Will load appropriate parameters
from the profile.json file based on this argument.
- :param model: path to the TFLite model
+ :param model: path to the TensorFlow Lite model
:param optimization_type: list of the optimization techniques separated
by comma, e.g. 'pruning,clustering'
:param optimization_target: list of the corresponding targets for
@@ -217,7 +217,7 @@ def optimization(
Example:
Run command for the target profile ethos-u55-256 and
- the provided TFLite model and print report on the standard output
+ the provided TensorFlow Lite model and print report on the standard output
>>> from mlia.cli.logging import setup_logging
>>> setup_logging()
diff --git a/src/mlia/cli/options.py b/src/mlia/cli/options.py
index f7f95c0..e5e85f0 100644
--- a/src/mlia/cli/options.py
+++ b/src/mlia/cli/options.py
@@ -62,15 +62,17 @@ def add_multi_optimization_options(parser: argparse.ArgumentParser) -> None:
def add_optional_tflite_model_options(parser: argparse.ArgumentParser) -> None:
"""Add optional model specific options."""
- model_group = parser.add_argument_group("TFLite model options")
+ model_group = parser.add_argument_group("TensorFlow Lite model options")
# make model parameter optional
- model_group.add_argument("model", nargs="?", help="TFLite model (optional)")
+ model_group.add_argument(
+ "model", nargs="?", help="TensorFlow Lite model (optional)"
+ )
def add_tflite_model_options(parser: argparse.ArgumentParser) -> None:
"""Add model specific options."""
- model_group = parser.add_argument_group("TFLite model options")
- model_group.add_argument("model", help="TFLite model")
+ model_group = parser.add_argument_group("TensorFlow Lite model options")
+ model_group.add_argument("model", help="TensorFlow Lite model")
def add_output_options(parser: argparse.ArgumentParser) -> None:
diff --git a/src/mlia/devices/ethosu/advice_generation.py b/src/mlia/devices/ethosu/advice_generation.py
index dee1650..8a38d2c 100644
--- a/src/mlia/devices/ethosu/advice_generation.py
+++ b/src/mlia/devices/ethosu/advice_generation.py
@@ -196,7 +196,7 @@ class EthosUStaticAdviceProducer(ContextAwareAdviceProducer):
Advice(
[
"For better performance, make sure that all the operators "
- "of your final TFLite model are supported by the NPU.",
+ "of your final TensorFlow Lite model are supported by the NPU.",
]
+ self.context.action_resolver.operator_compatibility_details()
)
diff --git a/src/mlia/devices/ethosu/advisor.py b/src/mlia/devices/ethosu/advisor.py
index be58de7..2c25f6c 100644
--- a/src/mlia/devices/ethosu/advisor.py
+++ b/src/mlia/devices/ethosu/advisor.py
@@ -52,10 +52,10 @@ class EthosUInferenceAdvisor(DefaultInferenceAdvisor):
# Performance and optimization are mutually exclusive.
# Decide which one to use (taking into account the model format).
if is_tflite_model(model):
- # TFLite models do not support optimization (only performance)!
+ # TensorFlow Lite models do not support optimization (only performance)!
if context.advice_category == AdviceCategory.OPTIMIZATION:
raise Exception(
- "Command 'optimization' is not supported for TFLite files."
+ "Command 'optimization' is not supported for TensorFlow Lite files."
)
if AdviceCategory.PERFORMANCE in context.advice_category:
collectors.append(EthosUPerformance(model, device, backends))
diff --git a/src/mlia/nn/tensorflow/config.py b/src/mlia/nn/tensorflow/config.py
index 6ee32e7..03d1d0f 100644
--- a/src/mlia/nn/tensorflow/config.py
+++ b/src/mlia/nn/tensorflow/config.py
@@ -31,7 +31,7 @@ class ModelConfiguration:
def convert_to_tflite(
self, tflite_model_path: str | Path, quantized: bool = False
) -> TFLiteModel:
- """Convert model to TFLite format."""
+ """Convert model to TensorFlow Lite format."""
raise NotImplementedError()
def convert_to_keras(self, keras_model_path: str | Path) -> KerasModel:
@@ -52,8 +52,8 @@ class KerasModel(ModelConfiguration):
def convert_to_tflite(
self, tflite_model_path: str | Path, quantized: bool = False
) -> TFLiteModel:
- """Convert model to TFLite format."""
- logger.info("Converting Keras to TFLite ...")
+ """Convert model to TensorFlow Lite format."""
+ logger.info("Converting Keras to TensorFlow Lite ...")
converted_model = convert_to_tflite(self.get_keras_model(), quantized)
logger.info("Done\n")
@@ -71,7 +71,7 @@ class KerasModel(ModelConfiguration):
class TFLiteModel(ModelConfiguration): # pylint: disable=abstract-method
- """TFLite model configuration."""
+ """TensorFlow Lite model configuration."""
def input_details(self) -> list[dict]:
"""Get model's input details."""
@@ -81,7 +81,7 @@ class TFLiteModel(ModelConfiguration): # pylint: disable=abstract-method
def convert_to_tflite(
self, tflite_model_path: str | Path, quantized: bool = False
) -> TFLiteModel:
- """Convert model to TFLite format."""
+ """Convert model to TensorFlow Lite format."""
return self
@@ -94,7 +94,7 @@ class TfModel(ModelConfiguration): # pylint: disable=abstract-method
def convert_to_tflite(
self, tflite_model_path: str | Path, quantized: bool = False
) -> TFLiteModel:
- """Convert model to TFLite format."""
+ """Convert model to TensorFlow Lite format."""
converted_model = convert_tf_to_tflite(self.model_path, quantized)
save_tflite_model(converted_model, tflite_model_path)
@@ -114,12 +114,12 @@ def get_model(model: str | Path) -> ModelConfiguration:
raise Exception(
"The input model format is not supported"
- "(supported formats: TFLite, Keras, TensorFlow saved model)!"
+ "(supported formats: TensorFlow Lite, Keras, TensorFlow saved model)!"
)
def get_tflite_model(model: str | Path, ctx: Context) -> TFLiteModel:
- """Convert input model to TFLite and returns TFLiteModel object."""
+ """Convert input model to TensorFlow Lite and returns TFLiteModel object."""
tflite_model_path = ctx.get_model_path("converted_model.tflite")
converted_model = get_model(model)
diff --git a/src/mlia/nn/tensorflow/tflite_metrics.py b/src/mlia/nn/tensorflow/tflite_metrics.py
index 0af7500..d7ae2a4 100644
--- a/src/mlia/nn/tensorflow/tflite_metrics.py
+++ b/src/mlia/nn/tensorflow/tflite_metrics.py
@@ -1,7 +1,7 @@
# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
# SPDX-License-Identifier: Apache-2.0
"""
-Contains class TFLiteMetrics to calculate metrics from a TFLite file.
+Contains class TFLiteMetrics to calculate metrics from a TensorFlow Lite file.
These metrics include:
* Sparsity (per layer and overall)
@@ -102,7 +102,7 @@ class ReportClusterMode(Enum):
class TFLiteMetrics:
- """Helper class to calculate metrics from a TFLite file.
+ """Helper class to calculate metrics from a TensorFlow Lite file.
Metrics include:
* sparsity (per-layer and overall)
@@ -111,12 +111,12 @@ class TFLiteMetrics:
"""
def __init__(self, tflite_file: str, ignore_list: list[str] | None = None) -> None:
- """Load the TFLite file and filter layers."""
+ """Load the TensorFlow Lite file and filter layers."""
self.tflite_file = tflite_file
if ignore_list is None:
ignore_list = DEFAULT_IGNORE_LIST
self.ignore_list = [ignore.casefold() for ignore in ignore_list]
- # Initialize the TFLite interpreter with the model file
+ # Initialize the TensorFlow Lite interpreter with the model file
self.interpreter = tf.lite.Interpreter(
model_path=tflite_file, experimental_preserve_all_tensors=True
)
@@ -218,7 +218,7 @@ class TFLiteMetrics:
"""Print a summary of all the model information."""
print(f"Model file: {self.tflite_file}")
print("#" * 80)
- print(" " * 28 + "### TFLITE SUMMARY ###")
+ print(" " * 28 + "### TENSORFLOW LITE SUMMARY ###")
print(f"File: {os.path.abspath(self.tflite_file)}")
print("Input(s):")
self._print_in_outs(self.interpreter.get_input_details(), verbose)
diff --git a/src/mlia/nn/tensorflow/utils.py b/src/mlia/nn/tensorflow/utils.py
index 6250f56..7970329 100644
--- a/src/mlia/nn/tensorflow/utils.py
+++ b/src/mlia/nn/tensorflow/utils.py
@@ -63,7 +63,7 @@ def representative_tf_dataset(model: str) -> Callable:
def convert_to_tflite(model: tf.keras.Model, quantized: bool = False) -> Interpreter:
- """Convert Keras model to TFLite."""
+ """Convert Keras model to TensorFlow Lite."""
if not isinstance(model, tf.keras.Model):
raise Exception("Invalid model type")
@@ -83,7 +83,7 @@ def convert_to_tflite(model: tf.keras.Model, quantized: bool = False) -> Interpr
def convert_tf_to_tflite(model: str, quantized: bool = False) -> Interpreter:
- """Convert TensorFlow model to TFLite."""
+ """Convert TensorFlow model to TensorFlow Lite."""
if not isinstance(model, str):
raise Exception("Invalid model type")
@@ -109,15 +109,15 @@ def save_keras_model(model: tf.keras.Model, save_path: str | Path) -> None:
def save_tflite_model(model: tf.lite.TFLiteConverter, save_path: str | Path) -> None:
- """Save TFLite model at provided path."""
+ """Save TensorFlow Lite model at provided path."""
with open(save_path, "wb") as file:
file.write(model)
def is_tflite_model(model: str | Path) -> bool:
- """Check if model type is supported by TFLite API.
+ """Check if model type is supported by TensorFlow Lite API.
- TFLite model is indicated by the model file extension .tflite
+ TensorFlow Lite model is indicated by the model file extension .tflite
"""
model_path = Path(model)
return model_path.suffix == ".tflite"
diff --git a/src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json b/src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json
index 5c44ebc..7bc12c7 100644
--- a/src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json
+++ b/src/mlia/resources/backend_configs/systems/corstone-300-vht/backend-config.json
@@ -16,7 +16,7 @@
"run": [
{
"name": "--data",
- "description": "Full file name for a custom model. Model must be in TFLite format compiled with Vela.",
+ "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
"values": [],
"alias": "input_file"
},
@@ -52,7 +52,7 @@
"run": [
{
"name": "--data",
- "description": "Full file name for a custom model. Model must be in TFLite format compiled with Vela.",
+ "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
"values": [],
"alias": "input_file"
},
diff --git a/src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json b/src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json
index 41d2fd0..c27c6f5 100644
--- a/src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json
+++ b/src/mlia/resources/backend_configs/systems/corstone-300/backend-config.json
@@ -16,7 +16,7 @@
"run": [
{
"name": "--data",
- "description": "Full file name for a custom model. Model must be in TFLite format compiled with Vela.",
+ "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
"values": [],
"alias": "input_file"
},
@@ -52,7 +52,7 @@
"run": [
{
"name": "--data",
- "description": "Full file name for a custom model. Model must be in TFLite format compiled with Vela.",
+ "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
"values": [],
"alias": "input_file"
},
diff --git a/src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json b/src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json
index 3ea9a6a..bbcadfd 100644
--- a/src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json
+++ b/src/mlia/resources/backend_configs/systems/corstone-310-vht/backend-config.json
@@ -16,7 +16,7 @@
"run": [
{
"name": "--data",
- "description": "Full file name for a custom model. Model must be in TFLite format compiled with Vela.",
+ "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
"values": [],
"alias": "input_file"
},
diff --git a/src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json b/src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json
index d043a2d..e0b1d1d 100644
--- a/src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json
+++ b/src/mlia/resources/backend_configs/systems/corstone-310/backend-config.json
@@ -16,7 +16,7 @@
"run": [
{
"name": "--data",
- "description": "Full file name for a custom model. Model must be in TFLite format compiled with Vela.",
+ "description": "Full file name for a custom model. Model must be in TensorFlow Lite format compiled with Vela.",
"values": [],
"alias": "input_file"
},
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json b/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json
index 7ee5e00..4856d27 100644
--- a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json
+++ b/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json
@@ -1,7 +1,7 @@
[
{
"name": "Generic Inference Runner: Ethos-U55/65 Shared SRAM",
- "description": "This application allows running inferences using custom NN TFLite models on Ethos-U. No data pre-/post-processing is executed.",
+ "description": "This application allows running inferences using custom NN TensorFlow Lite models on Ethos-U. No data pre-/post-processing is executed.",
"supported_systems": [
{
"name": "Corstone-300: Cortex-M55+Ethos-U55"
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json b/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json
index 51ff429..3b512ff 100644
--- a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json
+++ b/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json
@@ -1,7 +1,7 @@
[
{
"name": "Generic Inference Runner: Ethos-U55 SRAM",
- "description": "This application allows running inferences using custom NN TFLite models on Ethos-U. No data pre-/post-processing is executed.",
+ "description": "This application allows running inferences using custom NN TensorFlow Lite models on Ethos-U. No data pre-/post-processing is executed.",
"supported_systems": [
{
"name": "Corstone-300: Cortex-M55+Ethos-U55"
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U65-Dedicated_Sram-TA/backend-config.json b/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U65-Dedicated_Sram-TA/backend-config.json
index b59c85e..fc0569f 100644
--- a/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U65-Dedicated_Sram-TA/backend-config.json
+++ b/src/mlia/resources/backends/applications/inference_runner-sse-300-22.05.01-ethos-U65-Dedicated_Sram-TA/backend-config.json
@@ -1,7 +1,7 @@
[
{
"name": "Generic Inference Runner: Ethos-U65 Dedicated SRAM",
- "description": "This application allows running inferences using custom NN TFLite models on Ethos-U. No data pre-/post-processing is executed.",
+ "description": "This application allows running inferences using custom NN TensorFlow Lite models on Ethos-U. No data pre-/post-processing is executed.",
"supported_systems": [
{
"name": "Corstone-300: Cortex-M55+Ethos-U65"
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json b/src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json
index 69c5e60..0bdbd27 100644
--- a/src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json
+++ b/src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Shared_Sram-TA/backend-config.json
@@ -1,7 +1,7 @@
[
{
"name": "Generic Inference Runner: Ethos-U55/65 Shared SRAM",
- "description": "This application allows running inferences using custom NN TFLite models on Ethos-U. No data pre-/post-processing is executed.",
+ "description": "This application allows running inferences using custom NN TensorFlow Lite models on Ethos-U. No data pre-/post-processing is executed.",
"supported_systems": [
{
"name": "Corstone-310: Cortex-M85+Ethos-U55"
diff --git a/src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json b/src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json
index fbe4a16..a56e49d 100644
--- a/src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json
+++ b/src/mlia/resources/backends/applications/inference_runner-sse-310-22.05.01-ethos-U55-Sram_Only-TA/backend-config.json
@@ -1,7 +1,7 @@
[
{
"name": "Generic Inference Runner: Ethos-U55 SRAM",
- "description": "This application allows running inferences using custom NN TFLite models on Ethos-U. No data pre-/post-processing is executed.",
+ "description": "This application allows running inferences using custom NN TensorFlow Lite models on Ethos-U. No data pre-/post-processing is executed.",
"supported_systems": [
{
"name": "Corstone-310: Cortex-M85+Ethos-U55"
diff --git a/src/mlia/tools/vela_wrapper.py b/src/mlia/tools/vela_wrapper.py
index 47c15e9..00d2f2c 100644
--- a/src/mlia/tools/vela_wrapper.py
+++ b/src/mlia/tools/vela_wrapper.py
@@ -275,7 +275,7 @@ class VelaCompiler: # pylint: disable=too-many-instance-attributes
@staticmethod
def _read_model(model: str | Path) -> tuple[Graph, NetworkType]:
- """Read TFLite model."""
+ """Read TensorFlow Lite model."""
try:
model_path = str(model) if isinstance(model, Path) else model
@@ -448,7 +448,7 @@ def run_on_npu(operator: Op) -> NpuSupported:
a particular operator is supported to run on NPU.
There are two groups of checks:
- - general TFLite constraints
+ - general TensorFlow Lite constraints
- operator specific constraints
If an operator is not supported on NPU then this function