aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenjamin Klimczak <benjamin.klimczak@arm.com>2022-11-28 13:36:03 +0000
committerBenjamin Klimczak <benjamin.klimczak@arm.com>2022-12-06 17:05:37 +0000
commit0241f96fce1bbfa11f21bfbfa161b51f3170a51b (patch)
treef6ceae63670dd58f0b58de0ea55e039eddca3b50
parent6a88ee5315b4ce5b023370c1e55e48bf9f2b6f67 (diff)
downloadmlia-0241f96fce1bbfa11f21bfbfa161b51f3170a51b.tar.gz
MLIA-669 Upgrade dependencies with Vela 3.6
With Vela 3.6 we are able to remove the special treatment of aarch64 in our dependencies, i.e. - upgrade Vela to version 3.6 that resolves a compatibility issue for aarch64 in 3.4 and 3.5. - upgrade to TensorFlow 2.10 which now supports aarch64 (therefore making it obsolete to use 'tensorflow-aarch64'). Change-Id: I86508b667b5ccb55bfd11dcae9defc54e5ef74de
-rw-r--r--setup.cfg11
-rw-r--r--src/mlia/backend/vela/compiler.py14
-rw-r--r--tests/test_backend_vela_compiler.py6
-rw-r--r--tests/test_nn_tensorflow_tflite_metrics.py6
-rw-r--r--tests/test_target_ethos_u_config.py2
5 files changed, 19 insertions, 20 deletions
diff --git a/setup.cfg b/setup.cfg
index a37a53c..44d7387 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -28,17 +28,10 @@ python_requires = >=3.8
package_dir =
= src
packages = find_namespace:
-# On aarch64 machine, tensorflow does not pin the numpy version but
-# vela does. The pre-built aarch64 tensorflow 2.8.2 uses a numpy whose
-# version is conflicting with ethos-u-vela version 3.4.0. Therefore,
-# we downgraded both tensorflow and vela versions to make sure there is
-# no numpy version conflict.
install_requires =
- tensorflow-aarch64~=2.7.3; platform_machine=="aarch64"
- tensorflow~=2.9.2; platform_machine=="x86_64"
+ tensorflow~=2.10.1
tensorflow-model-optimization~=0.7.3
- ethos-u-vela~=3.3.0; platform_machine=="aarch64"
- ethos-u-vela~=3.5.0; platform_machine=="x86_64"
+ ethos-u-vela~=3.6.0
requests
rich
sh
diff --git a/src/mlia/backend/vela/compiler.py b/src/mlia/backend/vela/compiler.py
index 3d3847a..b62df24 100644
--- a/src/mlia/backend/vela/compiler.py
+++ b/src/mlia/backend/vela/compiler.py
@@ -89,7 +89,7 @@ class VelaCompilerOptions: # pylint: disable=too-many-instance-attributes
tensor_allocator: TensorAllocatorType = "HillClimb"
cpu_tensor_alignment: int = Tensor.AllocationQuantum
optimization_strategy: OptimizationStrategyType = "Performance"
- output_dir: str | None = None
+ output_dir: str = "output"
recursion_limit: int = 1000
@@ -131,6 +131,8 @@ class VelaCompiler: # pylint: disable=too-many-instance-attributes
if not nng:
raise Exception("Unable to read model")
+ output_basename = f"{self.output_dir}/{nng.name}"
+
try:
arch = self._architecture_features()
compiler_options = self._compiler_options()
@@ -140,7 +142,12 @@ class VelaCompiler: # pylint: disable=too-many-instance-attributes
logger, stdout_level=logging.DEBUG, stderr_level=logging.DEBUG
):
compiler_driver(
- nng, arch, compiler_options, scheduler_options, network_type
+ nng,
+ arch,
+ compiler_options,
+ scheduler_options,
+ network_type,
+ output_basename,
)
return OptimizedModel(nng, arch, compiler_options, scheduler_options)
@@ -186,9 +193,8 @@ class VelaCompiler: # pylint: disable=too-many-instance-attributes
@staticmethod
def _read_model(model: str | Path) -> tuple[Graph, NetworkType]:
"""Read TensorFlow Lite model."""
+ model_path = str(model) if isinstance(model, Path) else model
try:
- model_path = str(model) if isinstance(model, Path) else model
-
with redirect_output(
logger, stdout_level=logging.DEBUG, stderr_level=logging.DEBUG
):
diff --git a/tests/test_backend_vela_compiler.py b/tests/test_backend_vela_compiler.py
index ff07c74..20121d6 100644
--- a/tests/test_backend_vela_compiler.py
+++ b/tests/test_backend_vela_compiler.py
@@ -27,7 +27,7 @@ def test_default_vela_compiler() -> None:
assert default_compiler.tensor_allocator == TensorAllocator.HillClimb
assert default_compiler.cpu_tensor_alignment == 16
assert default_compiler.optimization_strategy == OptimizationStrategy.Performance
- assert default_compiler.output_dir is None
+ assert default_compiler.output_dir == "output"
assert default_compiler.get_config() == {
"accelerator_config": "ethos-u55-256",
@@ -86,7 +86,7 @@ def test_vela_compiler_with_parameters(test_resources_path: Path) -> None:
tensor_allocator="Greedy",
cpu_tensor_alignment=4,
optimization_strategy="Size",
- output_dir="output",
+ output_dir="custom_output",
)
compiler = VelaCompiler(compiler_options)
@@ -99,7 +99,7 @@ def test_vela_compiler_with_parameters(test_resources_path: Path) -> None:
assert compiler.tensor_allocator == TensorAllocator.Greedy
assert compiler.cpu_tensor_alignment == 4
assert compiler.optimization_strategy == OptimizationStrategy.Size
- assert compiler.output_dir == "output"
+ assert compiler.output_dir == "custom_output"
assert compiler.get_config() == {
"accelerator_config": "ethos-u65-256",
diff --git a/tests/test_nn_tensorflow_tflite_metrics.py b/tests/test_nn_tensorflow_tflite_metrics.py
index 0e4c79c..e8d7c09 100644
--- a/tests/test_nn_tensorflow_tflite_metrics.py
+++ b/tests/test_nn_tensorflow_tflite_metrics.py
@@ -34,10 +34,10 @@ def _sample_keras_model() -> tf.keras.Model:
def _sparse_binary_keras_model() -> tf.keras.Model:
def get_sparse_weights(shape: list[int]) -> np.ndarray:
weights = np.zeros(shape)
- with np.nditer(weights, op_flags=["writeonly"]) as weight_iterator:
- for idx, value in enumerate(weight_iterator):
+ with np.nditer(weights, op_flags=[["writeonly"]]) as weight_it:
+ for idx, value in enumerate(weight_it):
if idx % 2 == 0:
- value[...] = 1.0
+ value[...] = 1.0 # type: ignore
return weights
keras_model = _sample_keras_model()
diff --git a/tests/test_target_ethos_u_config.py b/tests/test_target_ethos_u_config.py
index 6ccd5ce..3160190 100644
--- a/tests/test_target_ethos_u_config.py
+++ b/tests/test_target_ethos_u_config.py
@@ -28,7 +28,7 @@ def test_compiler_options_default_init() -> None:
assert opts.tensor_allocator == "HillClimb"
assert opts.cpu_tensor_alignment == 16
assert opts.optimization_strategy == "Performance"
- assert opts.output_dir is None
+ assert opts.output_dir == "output"
def test_ethosu_target() -> None: