aboutsummaryrefslogtreecommitdiff
path: root/src/mlia/devices/ethosu/performance.py
diff options
context:
space:
mode:
authorDmitrii Agibov <dmitrii.agibov@arm.com>2022-10-24 15:08:08 +0100
committerDmitrii Agibov <dmitrii.agibov@arm.com>2022-10-26 17:08:13 +0100
commit58a65fee574c00329cf92b387a6d2513dcbf6100 (patch)
tree47e3185f78b4298ab029785ddee68456e44cac10 /src/mlia/devices/ethosu/performance.py
parent9d34cb72d45a6d0a2ec1063ebf32536c1efdba75 (diff)
downloadmlia-58a65fee574c00329cf92b387a6d2513dcbf6100.tar.gz
MLIA-433 Add TensorFlow Lite compatibility check
- Add ability to intercept low level TensorFlow output - Produce advice for the models that could not be converted to the TensorFlow Lite format - Refactor utility functions for TensorFlow Lite conversion - Add TensorFlow Lite compatibility checker Change-Id: I47d120d2619ced7b143bc92c5184515b81c0220d
Diffstat (limited to 'src/mlia/devices/ethosu/performance.py')
-rw-r--r--src/mlia/devices/ethosu/performance.py114
1 files changed, 57 insertions, 57 deletions
diff --git a/src/mlia/devices/ethosu/performance.py b/src/mlia/devices/ethosu/performance.py
index acc82e0..431dd89 100644
--- a/src/mlia/devices/ethosu/performance.py
+++ b/src/mlia/devices/ethosu/performance.py
@@ -17,6 +17,7 @@ from mlia.devices.ethosu.config import EthosUConfiguration
from mlia.nn.tensorflow.config import get_tflite_model
from mlia.nn.tensorflow.config import ModelConfiguration
from mlia.nn.tensorflow.optimizations.select import OptimizationSettings
+from mlia.utils.logging import log_action
logger = logging.getLogger(__name__)
@@ -125,25 +126,24 @@ class VelaPerformanceEstimator(
def estimate(self, model: Path | ModelConfiguration) -> MemoryUsage:
"""Estimate performance."""
- logger.info("Getting the memory usage metrics ...")
-
- model_path = (
- Path(model.model_path) if isinstance(model, ModelConfiguration) else model
- )
-
- vela_perf_metrics = vela.estimate_performance(
- model_path, self.device.compiler_options
- )
-
- memory_usage = MemoryUsage(
- vela_perf_metrics.sram_memory_area_size,
- vela_perf_metrics.dram_memory_area_size,
- vela_perf_metrics.unknown_memory_area_size,
- vela_perf_metrics.on_chip_flash_memory_area_size,
- vela_perf_metrics.off_chip_flash_memory_area_size,
- )
- logger.info("Done\n")
- return memory_usage
+ with log_action("Getting the memory usage metrics ..."):
+ model_path = (
+ Path(model.model_path)
+ if isinstance(model, ModelConfiguration)
+ else model
+ )
+
+ vela_perf_metrics = vela.estimate_performance(
+ model_path, self.device.compiler_options
+ )
+
+ return MemoryUsage(
+ vela_perf_metrics.sram_memory_area_size,
+ vela_perf_metrics.dram_memory_area_size,
+ vela_perf_metrics.unknown_memory_area_size,
+ vela_perf_metrics.on_chip_flash_memory_area_size,
+ vela_perf_metrics.off_chip_flash_memory_area_size,
+ )
class CorstonePerformanceEstimator(
@@ -161,44 +161,44 @@ class CorstonePerformanceEstimator(
def estimate(self, model: Path | ModelConfiguration) -> NPUCycles:
"""Estimate performance."""
- logger.info("Getting the performance metrics for '%s' ...", self.backend)
- logger.info(
- "WARNING: This task may require several minutes (press ctrl-c to interrupt)"
- )
-
- model_path = (
- Path(model.model_path) if isinstance(model, ModelConfiguration) else model
- )
-
- optimized_model_path = self.context.get_model_path(
- f"{model_path.stem}_vela.tflite"
- )
-
- vela.optimize_model(
- model_path, self.device.compiler_options, optimized_model_path
- )
-
- model_info = backend_manager.ModelInfo(model_path=optimized_model_path)
- device_info = backend_manager.DeviceInfo(
- device_type=self.device.target, # type: ignore
- mac=self.device.mac,
- )
-
- corstone_perf_metrics = backend_manager.estimate_performance(
- model_info, device_info, self.backend
- )
-
- npu_cycles = NPUCycles(
- corstone_perf_metrics.npu_active_cycles,
- corstone_perf_metrics.npu_idle_cycles,
- corstone_perf_metrics.npu_total_cycles,
- corstone_perf_metrics.npu_axi0_rd_data_beat_received,
- corstone_perf_metrics.npu_axi0_wr_data_beat_written,
- corstone_perf_metrics.npu_axi1_rd_data_beat_received,
- )
-
- logger.info("Done\n")
- return npu_cycles
+ with log_action(f"Getting the performance metrics for '{self.backend}' ..."):
+ logger.info(
+ "WARNING: This task may require several minutes "
+ "(press ctrl-c to interrupt)"
+ )
+
+ model_path = (
+ Path(model.model_path)
+ if isinstance(model, ModelConfiguration)
+ else model
+ )
+
+ optimized_model_path = self.context.get_model_path(
+ f"{model_path.stem}_vela.tflite"
+ )
+
+ vela.optimize_model(
+ model_path, self.device.compiler_options, optimized_model_path
+ )
+
+ model_info = backend_manager.ModelInfo(model_path=optimized_model_path)
+ device_info = backend_manager.DeviceInfo(
+ device_type=self.device.target, # type: ignore
+ mac=self.device.mac,
+ )
+
+ corstone_perf_metrics = backend_manager.estimate_performance(
+ model_info, device_info, self.backend
+ )
+
+ return NPUCycles(
+ corstone_perf_metrics.npu_active_cycles,
+ corstone_perf_metrics.npu_idle_cycles,
+ corstone_perf_metrics.npu_total_cycles,
+ corstone_perf_metrics.npu_axi0_rd_data_beat_received,
+ corstone_perf_metrics.npu_axi0_wr_data_beat_written,
+ corstone_perf_metrics.npu_axi1_rd_data_beat_received,
+ )
class EthosUPerformanceEstimator(