diff options
author | Benjamin Klimczak <benjamin.klimczak@arm.com> | 2022-11-28 13:36:03 +0000 |
---|---|---|
committer | Benjamin Klimczak <benjamin.klimczak@arm.com> | 2022-12-06 17:05:37 +0000 |
commit | 0241f96fce1bbfa11f21bfbfa161b51f3170a51b (patch) | |
tree | f6ceae63670dd58f0b58de0ea55e039eddca3b50 /src/mlia/backend/vela/compiler.py | |
parent | 6a88ee5315b4ce5b023370c1e55e48bf9f2b6f67 (diff) | |
download | mlia-0241f96fce1bbfa11f21bfbfa161b51f3170a51b.tar.gz |
MLIA-669 Upgrade dependencies with Vela 3.6
With Vela 3.6 we are able to remove the special treatment of aarch64
in our dependencies, i.e.
- upgrade Vela to version 3.6 that resolves a compatibility issue for
aarch64 in 3.4 and 3.5.
- upgrade to TensorFlow 2.10 which now supports aarch64 (therefore
making it obsolete to use 'tensorflow-aarch64').
Change-Id: I86508b667b5ccb55bfd11dcae9defc54e5ef74de
Diffstat (limited to 'src/mlia/backend/vela/compiler.py')
-rw-r--r-- | src/mlia/backend/vela/compiler.py | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/src/mlia/backend/vela/compiler.py b/src/mlia/backend/vela/compiler.py index 3d3847a..b62df24 100644 --- a/src/mlia/backend/vela/compiler.py +++ b/src/mlia/backend/vela/compiler.py @@ -89,7 +89,7 @@ class VelaCompilerOptions: # pylint: disable=too-many-instance-attributes tensor_allocator: TensorAllocatorType = "HillClimb" cpu_tensor_alignment: int = Tensor.AllocationQuantum optimization_strategy: OptimizationStrategyType = "Performance" - output_dir: str | None = None + output_dir: str = "output" recursion_limit: int = 1000 @@ -131,6 +131,8 @@ class VelaCompiler: # pylint: disable=too-many-instance-attributes if not nng: raise Exception("Unable to read model") + output_basename = f"{self.output_dir}/{nng.name}" + try: arch = self._architecture_features() compiler_options = self._compiler_options() @@ -140,7 +142,12 @@ class VelaCompiler: # pylint: disable=too-many-instance-attributes logger, stdout_level=logging.DEBUG, stderr_level=logging.DEBUG ): compiler_driver( - nng, arch, compiler_options, scheduler_options, network_type + nng, + arch, + compiler_options, + scheduler_options, + network_type, + output_basename, ) return OptimizedModel(nng, arch, compiler_options, scheduler_options) @@ -186,9 +193,8 @@ class VelaCompiler: # pylint: disable=too-many-instance-attributes @staticmethod def _read_model(model: str | Path) -> tuple[Graph, NetworkType]: """Read TensorFlow Lite model.""" + model_path = str(model) if isinstance(model, Path) else model try: - model_path = str(model) if isinstance(model, Path) else model - with redirect_output( logger, stdout_level=logging.DEBUG, stderr_level=logging.DEBUG ): |