aboutsummaryrefslogtreecommitdiff
path: root/tests/test_devices_ethosu_advice_generation.py
diff options
context:
space:
mode:
authorBenjamin Klimczak <benjamin.klimczak@arm.com>2022-07-11 12:33:42 +0100
committerBenjamin Klimczak <benjamin.klimczak@arm.com>2022-07-26 14:08:21 +0100
commit5d81f37de09efe10f90512e50252be9c36925fcf (patch)
treeb4d7cdfd051da0a6e882bdfcf280fd7ca7b39e57 /tests/test_devices_ethosu_advice_generation.py
parent7899b908c1fe6d86b92a80f3827ddd0ac05b674b (diff)
downloadmlia-5d81f37de09efe10f90512e50252be9c36925fcf.tar.gz
MLIA-551 Rework remains of AIET architecture
Re-factoring the code base to further merge the old AIET code into MLIA. - Remove last traces of the backend type 'tool' - Controlled systems removed, including SSH protocol, controller, RunningCommand, locks etc. - Build command / build dir and deploy functionality removed from Applications and Systems - Moving working_dir() - Replace module 'output_parser' with new module 'output_consumer' and merge Base64 parsing into it - Change the output consumption to optionally remove (i.e. actually consume) lines - Use Base64 parsing in GenericInferenceOutputParser, replacing the regex-based parsing and remove the now unused regex parsing - Remove AIET reporting - Pre-install applications by moving them to src/mlia/resources/backends - Rename aiet-config.json to backend-config.json - Move tests from tests/mlia/ to tests/ - Adapt unit tests to code changes - Dependencies removed: paramiko, filelock, psutil - Fix bug in corstone.py: The wrong resource directory was used which broke the functionality to download backends. - Use f-string formatting. - Use logging instead of print. Change-Id: I768bc3bb6b2eda57d219ad01be4a8e0a74167d76
Diffstat (limited to 'tests/test_devices_ethosu_advice_generation.py')
-rw-r--r--tests/test_devices_ethosu_advice_generation.py482
1 files changed, 482 insertions, 0 deletions
diff --git a/tests/test_devices_ethosu_advice_generation.py b/tests/test_devices_ethosu_advice_generation.py
new file mode 100644
index 0000000..5d37376
--- /dev/null
+++ b/tests/test_devices_ethosu_advice_generation.py
@@ -0,0 +1,482 @@
+# SPDX-FileCopyrightText: Copyright 2022, Arm Limited and/or its affiliates.
+# SPDX-License-Identifier: Apache-2.0
+"""Tests for Ethos-U advice generation."""
+from typing import List
+
+import pytest
+
+from mlia.cli.helpers import CLIActionResolver
+from mlia.core.advice_generation import Advice
+from mlia.core.common import AdviceCategory
+from mlia.core.common import DataItem
+from mlia.core.context import ExecutionContext
+from mlia.core.helpers import ActionResolver
+from mlia.core.helpers import APIActionResolver
+from mlia.devices.ethosu.advice_generation import EthosUAdviceProducer
+from mlia.devices.ethosu.advice_generation import EthosUStaticAdviceProducer
+from mlia.devices.ethosu.data_analysis import AllOperatorsSupportedOnNPU
+from mlia.devices.ethosu.data_analysis import HasCPUOnlyOperators
+from mlia.devices.ethosu.data_analysis import HasUnsupportedOnNPUOperators
+from mlia.devices.ethosu.data_analysis import OptimizationDiff
+from mlia.devices.ethosu.data_analysis import OptimizationResults
+from mlia.devices.ethosu.data_analysis import PerfMetricDiff
+from mlia.nn.tensorflow.optimizations.select import OptimizationSettings
+
+
+@pytest.mark.parametrize(
+ "input_data, advice_category, action_resolver, expected_advice",
+ [
+ [
+ AllOperatorsSupportedOnNPU(),
+ AdviceCategory.OPERATORS,
+ APIActionResolver(),
+ [
+ Advice(
+ [
+ "You don't have any unsupported operators, your model will "
+ "run completely on NPU."
+ ]
+ )
+ ],
+ ],
+ [
+ AllOperatorsSupportedOnNPU(),
+ AdviceCategory.OPERATORS,
+ CLIActionResolver(
+ {
+ "target_profile": "sample_target",
+ "model": "sample_model.tflite",
+ }
+ ),
+ [
+ Advice(
+ [
+ "You don't have any unsupported operators, your model will "
+ "run completely on NPU.",
+ "Check the estimated performance by running the "
+ "following command: ",
+ "mlia performance --target-profile sample_target "
+ "sample_model.tflite",
+ ]
+ )
+ ],
+ ],
+ [
+ HasCPUOnlyOperators(cpu_only_ops=["OP1", "OP2", "OP3"]),
+ AdviceCategory.OPERATORS,
+ APIActionResolver(),
+ [
+ Advice(
+ [
+ "You have at least 3 operators that is CPU only: "
+ "OP1,OP2,OP3.",
+ "Using operators that are supported by the NPU will "
+ "improve performance.",
+ ]
+ )
+ ],
+ ],
+ [
+ HasCPUOnlyOperators(cpu_only_ops=["OP1", "OP2", "OP3"]),
+ AdviceCategory.OPERATORS,
+ CLIActionResolver({}),
+ [
+ Advice(
+ [
+ "You have at least 3 operators that is CPU only: "
+ "OP1,OP2,OP3.",
+ "Using operators that are supported by the NPU will "
+ "improve performance.",
+ "For guidance on supported operators, run: mlia operators "
+ "--supported-ops-report",
+ ]
+ )
+ ],
+ ],
+ [
+ HasUnsupportedOnNPUOperators(npu_unsupported_ratio=0.4),
+ AdviceCategory.OPERATORS,
+ APIActionResolver(),
+ [
+ Advice(
+ [
+ "You have 40% of operators that cannot be placed on the NPU.",
+ "For better performance, please review the reasons reported "
+ "in the table, and adjust the model accordingly "
+ "where possible.",
+ ]
+ )
+ ],
+ ],
+ [
+ HasUnsupportedOnNPUOperators(npu_unsupported_ratio=0.4),
+ AdviceCategory.OPERATORS,
+ CLIActionResolver({}),
+ [
+ Advice(
+ [
+ "You have 40% of operators that cannot be placed on the NPU.",
+ "For better performance, please review the reasons reported "
+ "in the table, and adjust the model accordingly "
+ "where possible.",
+ ]
+ )
+ ],
+ ],
+ [
+ OptimizationResults(
+ [
+ OptimizationDiff(
+ opt_type=[OptimizationSettings("pruning", 0.5, None)],
+ opt_diffs={
+ "sram": PerfMetricDiff(100, 150),
+ "dram": PerfMetricDiff(100, 50),
+ "on_chip_flash": PerfMetricDiff(100, 100),
+ "off_chip_flash": PerfMetricDiff(100, 100),
+ "npu_total_cycles": PerfMetricDiff(10, 5),
+ },
+ ),
+ ]
+ ),
+ AdviceCategory.OPTIMIZATION,
+ APIActionResolver(),
+ [
+ Advice(
+ [
+ "With the selected optimization (pruning: 0.5)",
+ "- You have achieved 50.00% performance improvement in "
+ "DRAM used (KB)",
+ "- You have achieved 50.00% performance improvement in "
+ "NPU total cycles",
+ "- SRAM used (KB) have degraded by 50.00%",
+ "You can try to push the optimization target higher "
+ "(e.g. pruning: 0.6) "
+ "to check if those results can be further improved.",
+ ]
+ ),
+ Advice(
+ [
+ "The applied tooling techniques have an impact "
+ "on accuracy. Additional hyperparameter tuning may be required "
+ "after any optimization."
+ ]
+ ),
+ ],
+ ],
+ [
+ OptimizationResults(
+ [
+ OptimizationDiff(
+ opt_type=[OptimizationSettings("pruning", 0.5, None)],
+ opt_diffs={
+ "sram": PerfMetricDiff(100, 150),
+ "dram": PerfMetricDiff(100, 50),
+ "on_chip_flash": PerfMetricDiff(100, 100),
+ "off_chip_flash": PerfMetricDiff(100, 100),
+ "npu_total_cycles": PerfMetricDiff(10, 5),
+ },
+ ),
+ ]
+ ),
+ AdviceCategory.OPTIMIZATION,
+ CLIActionResolver({"model": "sample_model.h5"}),
+ [
+ Advice(
+ [
+ "With the selected optimization (pruning: 0.5)",
+ "- You have achieved 50.00% performance improvement in "
+ "DRAM used (KB)",
+ "- You have achieved 50.00% performance improvement in "
+ "NPU total cycles",
+ "- SRAM used (KB) have degraded by 50.00%",
+ "You can try to push the optimization target higher "
+ "(e.g. pruning: 0.6) "
+ "to check if those results can be further improved.",
+ "For more info: mlia optimization --help",
+ "Optimization command: "
+ "mlia optimization --optimization-type pruning "
+ "--optimization-target 0.6 sample_model.h5",
+ ]
+ ),
+ Advice(
+ [
+ "The applied tooling techniques have an impact "
+ "on accuracy. Additional hyperparameter tuning may be required "
+ "after any optimization."
+ ]
+ ),
+ ],
+ ],
+ [
+ OptimizationResults(
+ [
+ OptimizationDiff(
+ opt_type=[
+ OptimizationSettings("pruning", 0.5, None),
+ OptimizationSettings("clustering", 32, None),
+ ],
+ opt_diffs={
+ "sram": PerfMetricDiff(100, 150),
+ "dram": PerfMetricDiff(100, 50),
+ "on_chip_flash": PerfMetricDiff(100, 100),
+ "off_chip_flash": PerfMetricDiff(100, 100),
+ "npu_total_cycles": PerfMetricDiff(10, 5),
+ },
+ ),
+ ]
+ ),
+ AdviceCategory.OPTIMIZATION,
+ APIActionResolver(),
+ [
+ Advice(
+ [
+ "With the selected optimization (pruning: 0.5, clustering: 32)",
+ "- You have achieved 50.00% performance improvement in "
+ "DRAM used (KB)",
+ "- You have achieved 50.00% performance improvement in "
+ "NPU total cycles",
+ "- SRAM used (KB) have degraded by 50.00%",
+ "You can try to push the optimization target higher "
+ "(e.g. pruning: 0.6 and/or clustering: 16) "
+ "to check if those results can be further improved.",
+ ]
+ ),
+ Advice(
+ [
+ "The applied tooling techniques have an impact "
+ "on accuracy. Additional hyperparameter tuning may be required "
+ "after any optimization."
+ ]
+ ),
+ ],
+ ],
+ [
+ OptimizationResults(
+ [
+ OptimizationDiff(
+ opt_type=[
+ OptimizationSettings("clustering", 2, None),
+ ],
+ opt_diffs={
+ "sram": PerfMetricDiff(100, 150),
+ "dram": PerfMetricDiff(100, 50),
+ "on_chip_flash": PerfMetricDiff(100, 100),
+ "off_chip_flash": PerfMetricDiff(100, 100),
+ "npu_total_cycles": PerfMetricDiff(10, 5),
+ },
+ ),
+ ]
+ ),
+ AdviceCategory.OPTIMIZATION,
+ APIActionResolver(),
+ [
+ Advice(
+ [
+ "With the selected optimization (clustering: 2)",
+ "- You have achieved 50.00% performance improvement in "
+ "DRAM used (KB)",
+ "- You have achieved 50.00% performance improvement in "
+ "NPU total cycles",
+ "- SRAM used (KB) have degraded by 50.00%",
+ ]
+ ),
+ Advice(
+ [
+ "The applied tooling techniques have an impact "
+ "on accuracy. Additional hyperparameter tuning may be required "
+ "after any optimization."
+ ]
+ ),
+ ],
+ ],
+ [
+ OptimizationResults(
+ [
+ OptimizationDiff(
+ opt_type=[OptimizationSettings("pruning", 0.5, None)],
+ opt_diffs={
+ "sram": PerfMetricDiff(100, 150),
+ "dram": PerfMetricDiff(100, 150),
+ "on_chip_flash": PerfMetricDiff(100, 150),
+ "off_chip_flash": PerfMetricDiff(100, 150),
+ "npu_total_cycles": PerfMetricDiff(10, 100),
+ },
+ ),
+ ]
+ ),
+ AdviceCategory.OPTIMIZATION,
+ APIActionResolver(),
+ [
+ Advice(
+ [
+ "With the selected optimization (pruning: 0.5)",
+ "- DRAM used (KB) have degraded by 50.00%",
+ "- SRAM used (KB) have degraded by 50.00%",
+ "- On chip flash used (KB) have degraded by 50.00%",
+ "- Off chip flash used (KB) have degraded by 50.00%",
+ "- NPU total cycles have degraded by 900.00%",
+ "The performance seems to have degraded after "
+ "applying the selected optimizations, "
+ "try exploring different optimization types/targets.",
+ ]
+ ),
+ Advice(
+ [
+ "The applied tooling techniques have an impact "
+ "on accuracy. Additional hyperparameter tuning may be required "
+ "after any optimization."
+ ]
+ ),
+ ],
+ ],
+ [
+ OptimizationResults(
+ [
+ OptimizationDiff(
+ opt_type=[OptimizationSettings("pruning", 0.5, None)],
+ opt_diffs={
+ "sram": PerfMetricDiff(100, 150),
+ "dram": PerfMetricDiff(100, 150),
+ "on_chip_flash": PerfMetricDiff(100, 150),
+ "off_chip_flash": PerfMetricDiff(100, 150),
+ "npu_total_cycles": PerfMetricDiff(10, 100),
+ },
+ ),
+ OptimizationDiff(
+ opt_type=[OptimizationSettings("pruning", 0.6, None)],
+ opt_diffs={
+ "sram": PerfMetricDiff(100, 150),
+ "dram": PerfMetricDiff(100, 150),
+ "on_chip_flash": PerfMetricDiff(100, 150),
+ "off_chip_flash": PerfMetricDiff(100, 150),
+ "npu_total_cycles": PerfMetricDiff(10, 100),
+ },
+ ),
+ ]
+ ),
+ AdviceCategory.OPTIMIZATION,
+ APIActionResolver(),
+ [], # no advice for more than one optimization result
+ ],
+ ],
+)
+def test_ethosu_advice_producer(
+ tmpdir: str,
+ input_data: DataItem,
+ expected_advice: List[Advice],
+ advice_category: AdviceCategory,
+ action_resolver: ActionResolver,
+) -> None:
+ """Test Ethos-U Advice producer."""
+ producer = EthosUAdviceProducer()
+
+ context = ExecutionContext(
+ advice_category=advice_category,
+ working_dir=tmpdir,
+ action_resolver=action_resolver,
+ )
+
+ producer.set_context(context)
+ producer.produce_advice(input_data)
+
+ assert producer.get_advice() == expected_advice
+
+
+@pytest.mark.parametrize(
+ "advice_category, action_resolver, expected_advice",
+ [
+ [
+ AdviceCategory.ALL,
+ None,
+ [],
+ ],
+ [
+ AdviceCategory.OPERATORS,
+ None,
+ [],
+ ],
+ [
+ AdviceCategory.PERFORMANCE,
+ APIActionResolver(),
+ [
+ Advice(
+ [
+ "You can improve the inference time by using only operators "
+ "that are supported by the NPU.",
+ ]
+ ),
+ Advice(
+ [
+ "Check if you can improve the performance by applying "
+ "tooling techniques to your model."
+ ]
+ ),
+ ],
+ ],
+ [
+ AdviceCategory.PERFORMANCE,
+ CLIActionResolver({"model": "test_model.h5"}),
+ [
+ Advice(
+ [
+ "You can improve the inference time by using only operators "
+ "that are supported by the NPU.",
+ "Try running the following command to verify that:",
+ "mlia operators test_model.h5",
+ ]
+ ),
+ Advice(
+ [
+ "Check if you can improve the performance by applying "
+ "tooling techniques to your model.",
+ "For example: mlia optimization --optimization-type "
+ "pruning,clustering --optimization-target 0.5,32 "
+ "test_model.h5",
+ "For more info: mlia optimization --help",
+ ]
+ ),
+ ],
+ ],
+ [
+ AdviceCategory.OPTIMIZATION,
+ APIActionResolver(),
+ [
+ Advice(
+ [
+ "For better performance, make sure that all the operators "
+ "of your final TFLite model are supported by the NPU.",
+ ]
+ )
+ ],
+ ],
+ [
+ AdviceCategory.OPTIMIZATION,
+ CLIActionResolver({"model": "test_model.h5"}),
+ [
+ Advice(
+ [
+ "For better performance, make sure that all the operators "
+ "of your final TFLite model are supported by the NPU.",
+ "For more details, run: mlia operators --help",
+ ]
+ )
+ ],
+ ],
+ ],
+)
+def test_ethosu_static_advice_producer(
+ tmpdir: str,
+ advice_category: AdviceCategory,
+ action_resolver: ActionResolver,
+ expected_advice: List[Advice],
+) -> None:
+ """Test static advice generation."""
+ producer = EthosUStaticAdviceProducer()
+
+ context = ExecutionContext(
+ advice_category=advice_category,
+ working_dir=tmpdir,
+ action_resolver=action_resolver,
+ )
+ producer.set_context(context)
+ assert producer.get_advice() == expected_advice